Skip to content

Commit

Permalink
[DAPHNE-191] Refactor CUDA buffer mgmt (aka introducing Object Meta D…
Browse files Browse the repository at this point in the history
…ata)

This change introduces a major change how external storage buffers (CUDA memory specifically) are handled. In that regard, the following noteworthy changes are implemented:
* Factor out CUDA allocations from DenseMatrix (one of the initial motivations of issue daphne-eu#191)
* Introduce a mechanism to handle several storage backends and track ranges of a Structure's data.
* To make use of the mechanism, an AllocationDescriptor is passed to create() and getValues() (at the moment only DenseMatrix is supported).
* Allocation descriptors need to implement the IAllocationDescriptor interface. This decouples backend specific dependencies.
* AllocationDescriptorHost and AllocationDescriptorCUDA are implemented atm. The former is more or less a no-op for now.
* The CUDA memory allocation and data movement is moved to the CUDAContext class. It keeps track of its allocations per device. For now this does nothing but can be used to reuse allocations in the future.

Closes daphne-eu#191, Closes daphne-eu#334
  • Loading branch information
corepointer authored and aristotelis96 committed Jun 6, 2022
1 parent 40ca604 commit f02fa9c
Show file tree
Hide file tree
Showing 45 changed files with 657 additions and 441 deletions.
23 changes: 20 additions & 3 deletions src/runtime/local/context/CUDAContext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@

#include "runtime/local/context/CUDAContext.h"

size_t CUDAContext::alloc_count = 0;

void CUDAContext::destroy() {
#ifndef NDEBUG
std::cout << "Destroying CUDA context..." << std::endl;
Expand Down Expand Up @@ -110,9 +112,7 @@ void* CUDAContext::getCUDNNWorkspace(size_t size) {
}

std::unique_ptr<IContext> CUDAContext::createCudaContext(int device_id) {
//#ifndef NDEBUG
// std::cout << "creating CUDA context..." << std::endl;
//#endif

int device_count = -1;
CHECK_CUDART(cudaGetDeviceCount(&device_count));

Expand All @@ -131,3 +131,20 @@ std::unique_ptr<IContext> CUDAContext::createCudaContext(int device_id) {
return ctx;
}

std::shared_ptr<std::byte> CUDAContext::malloc(size_t size, bool zero, size_t& id) {
id = alloc_count++;
std::byte* dev_ptr;
CHECK_CUDART(cudaMalloc(reinterpret_cast<void **>(&dev_ptr), size));
allocations.emplace(id, std::shared_ptr<std::byte>(dev_ptr, CudaDeleter<std::byte>()));

if(zero)
CHECK_CUDART(cudaMemset(dev_ptr, 0, size));
return allocations.at(id);
}

void CUDAContext::free(size_t id) {
// ToDo: handle reuse
CHECK_CUDART(cudaFree(allocations.at(id).get()));
allocations.erase(id);
}

30 changes: 15 additions & 15 deletions src/runtime/local/context/CUDAContext.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@

#pragma once

//#include <api/cli/DaphneUserConfig.h>
#include "IContext.h"
#include <runtime/local/kernels/CUDA/HostUtils.h>
//#include <cublasLt.h>
#include "runtime/local/context/DaphneContext.h"
#include "runtime/local/kernels/CUDA/HostUtils.h"

#include <cassert>
#include <iostream>
#include <memory>
#include <map>

class CUDAContext : public IContext {
int device_id = -1;
Expand All @@ -44,13 +44,13 @@ class CUDAContext : public IContext {
// preallocate 64MB
size_t cudnn_workspace_size{};
void* cudnn_workspace{};

// cublasLt API
// cublasLtHandle_t cublaslt_Handle = nullptr;
// void* cublas_workspace{};
// size_t cublas_workspace_size{};


std::map<size_t, std::shared_ptr<std::byte>> allocations;
static size_t alloc_count;
explicit CUDAContext(int id) : device_id(id) { }

void init();

public:
CUDAContext() = delete;
CUDAContext(const CUDAContext&) = delete;
Expand All @@ -63,9 +63,6 @@ class CUDAContext : public IContext {
[[nodiscard]] cublasHandle_t getCublasHandle() const { return cublas_handle; }
[[nodiscard]] cusparseHandle_t getCusparseHandle() const { return cusparse_handle; }

// [[nodiscard]] cublasLtHandle_t getCublasLtHandle() const { return cublaslt_Handle; }
// [[nodiscard]] void* getCublasWorkspacePtr() const { return cublas_workspace; }
// [[nodiscard]] size_t getCublasWorkspaceSize() const { return cublas_workspace_size; }
[[nodiscard]] const cudaDeviceProp* getDeviceProperties() const { return &device_properties; }
[[nodiscard]] cudnnHandle_t getCUDNNHandle() const { return cudnn_handle; }
[[nodiscard]] cusolverDnHandle_t getCUSOLVERHandle() const { return cusolver_handle; }
Expand All @@ -79,7 +76,7 @@ class CUDAContext : public IContext {

void* getCUDNNWorkspace(size_t size);

size_t getMemBudget() { return mem_budget; }
[[nodiscard]] size_t getMemBudget() const { return mem_budget; }

int conv_algorithm = -1;
cudnnPoolingDescriptor_t pooling_desc{};
Expand All @@ -91,7 +88,10 @@ class CUDAContext : public IContext {
cudnnFilterDescriptor_t filterDesc{};
cudnnBatchNormMode_t bn_mode = CUDNN_BATCHNORM_SPATIAL;

static CUDAContext* get(DaphneContext* ctx, size_t id) { return dynamic_cast<CUDAContext*>(ctx->getCUDAContext(id)); }

private:
void init();

std::shared_ptr<std::byte> malloc(size_t size, bool zero, size_t& id);
void free(size_t id);

};
13 changes: 2 additions & 11 deletions src/runtime/local/context/DaphneContext.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,6 @@
* limitations under the License.
*/

#ifndef SRC_RUNTIME_LOCAL_CONTEXT_DAPHNECONTEXT_H
#define SRC_RUNTIME_LOCAL_CONTEXT_DAPHNECONTEXT_H

#pragma once

#include <api/cli/DaphneUserConfig.h>
Expand All @@ -27,10 +24,6 @@

#include "IContext.h"

#ifdef USE_CUDA
#include "CUDAContext.h"
#endif

// This macro is intended to be used in kernel function signatures, such that
// we can change the ubiquitous DaphneContext parameter in a single place, if
// required.
Expand Down Expand Up @@ -77,14 +70,12 @@ struct DaphneContext {

#ifdef USE_CUDA
// ToDo: in a multi device setting this should use a find call instead of a direct [] access
[[nodiscard]] CUDAContext* getCUDAContext(int dev_id) const {
return dynamic_cast<CUDAContext*>(cuda_contexts[dev_id].get());
[[nodiscard]] IContext* getCUDAContext(size_t dev_id) const {
return cuda_contexts[dev_id].get();
}
#endif

[[nodiscard]] bool useCUDA() const { return !cuda_contexts.empty(); }

[[maybe_unused]] [[nodiscard]] DaphneUserConfig getUserConfig() const { return config; }
};

#endif //SRC_RUNTIME_LOCAL_CONTEXT_DAPHNECONTEXT_H
5 changes: 0 additions & 5 deletions src/runtime/local/context/IContext.h
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
#ifndef DAPHNE_PROTOTYPE_ICONTEXT_H
#define DAPHNE_PROTOTYPE_ICONTEXT_H

#pragma once

class IContext {
public:
virtual void destroy() = 0;
};

#endif //DAPHNE_PROTOTYPE_ICONTEXT_H
69 changes: 69 additions & 0 deletions src/runtime/local/datastructures/AllocationDescriptorCUDA.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
/*
* Copyright 2022 The DAPHNE Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cstdint>
#include "ObjectMetaData.h"
#include "runtime/local/context/CUDAContext.h"

class AllocationDescriptorCUDA : public IAllocationDescriptor {
ALLOCATION_TYPE type = ALLOCATION_TYPE::GPU_CUDA;
uint32_t device_id{};
DaphneContext* dctx{};
std::shared_ptr<std::byte> data{};
size_t alloc_id{};

public:
AllocationDescriptorCUDA() = delete;

AllocationDescriptorCUDA(DaphneContext* ctx, uint32_t device_id) : device_id(device_id), dctx(ctx) { }

~AllocationDescriptorCUDA() override {
// ToDo: for now we free if this is the last context-external ref to the buffer
if(data.use_count() == 2) {
CUDAContext::get(dctx, device_id)->free(alloc_id);
}
}

[[nodiscard]] ALLOCATION_TYPE getType() const override { return type; }

[[nodiscard]] uint32_t getLocation() const { return device_id; }

void createAllocation(size_t size, bool zero) override {
auto ctx = CUDAContext::get(dctx, device_id);
data = ctx->malloc(size, zero, alloc_id);
}

std::shared_ptr<std::byte> getData() override { return data; }

[[nodiscard]] std::unique_ptr<IAllocationDescriptor> clone() const override {
return std::make_unique<AllocationDescriptorCUDA>(*this);
}

void transferTo(std::byte* src, size_t size) override {
CHECK_CUDART(cudaMemcpy(data.get(), src, size, cudaMemcpyHostToDevice));
}
void transferFrom(std::byte* dst, size_t size) override {
CHECK_CUDART(cudaMemcpy(dst, data.get(), size, cudaMemcpyDeviceToHost));
};

bool operator==(const IAllocationDescriptor* other) const override {
if(getType() == other->getType())
return(getLocation() == dynamic_cast<const AllocationDescriptorCUDA *>(other)->getLocation());
return false;
}
};
34 changes: 34 additions & 0 deletions src/runtime/local/datastructures/AllocationDescriptorHost.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
/*
* Copyright 2022 The DAPHNE Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include "ObjectMetaData.h"
#include <cstdint>

class AllocationDescriptorHost : public IAllocationDescriptor {
public:
~AllocationDescriptorHost() override = default;
[[nodiscard]] ALLOCATION_TYPE getType() const override { return ALLOCATION_TYPE::HOST; }
void createAllocation(size_t size, bool zero) override { }
std::shared_ptr<std::byte> getData() override { return nullptr; }
void transferTo(std::byte* src, size_t size) override { }
void transferFrom(std::byte* dst, size_t size) override {}
[[nodiscard]] std::unique_ptr<IAllocationDescriptor> clone() const override {
return std::make_unique<AllocationDescriptorHost>(*this);
}
bool operator==(const IAllocationDescriptor* other) const override { return (getType() == other->getType()); }
};
6 changes: 5 additions & 1 deletion src/runtime/local/datastructures/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,11 @@
add_library(DataStructures
Frame.cpp
ValueTypeUtils.cpp
DenseMatrix.cpp)
DenseMatrix.cpp
ObjectMetaData.h
AllocationDescriptorHost.h
AllocationDescriptorCUDA.h
ObjectMetaData.cpp)

if(USE_CUDA AND CMAKE_CUDA_COMPILER)
target_include_directories(DataStructures PUBLIC ${CUDAToolkit_INCLUDE_DIRS})
Expand Down
Loading

0 comments on commit f02fa9c

Please sign in to comment.