Skip to content

Commit

Permalink
refactor/collenchyma: remove Collenchyma CUDA memory
Browse files Browse the repository at this point in the history
remove Collenchyma CUDA memory from ConvolutionConfig

CLOSES #16

BREAKING CHANGE: removes the workspace from the ConvolutionConfig,
changing its constructor and now requires an additional pointer to
be passed to convolution functions
  • Loading branch information
hobofan committed Feb 29, 2016
1 parent 0dd0174 commit d1e106c
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 34 deletions.
1 change: 0 additions & 1 deletion cudnn/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ license = "MIT OR Apache-2.0"

[dependencies]
libc = "0.2"
collenchyma = "0.0.8"
cudnn-sys = { version = "0.0.2", path = "../cudnn-sys" }

clippy = { version = "0.0.27", optional = true }
Expand Down
16 changes: 9 additions & 7 deletions cudnn/src/cudnn.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

use super::*;
use super::utils::{ConvolutionConfig, NormalizationConfig, PoolingConfig, ScalParams};
use co::frameworks::cuda::Memory;

#[derive(Debug, Clone)]
/// Provides a the high-level interface to CUDA's cuDNN.
Expand Down Expand Up @@ -68,9 +67,9 @@ impl Cudnn {

Ok(
ConvolutionConfig::new(
algos_fwd[0].algo, Memory::new(workspace_size_fwd).unwrap(), workspace_size_fwd,
algos_filter_bwd[0].algo, Memory::new(workspace_filter_size_bwd).unwrap(), workspace_filter_size_bwd,
algos_data_bwd[0].algo, Memory::new(workspace_data_size_bwd).unwrap(), workspace_data_size_bwd,
algos_fwd[0].algo, workspace_size_fwd,
algos_filter_bwd[0].algo, workspace_filter_size_bwd,
algos_data_bwd[0].algo, workspace_data_size_bwd,
conv_desc, filter_desc
)
)
Expand Down Expand Up @@ -232,6 +231,7 @@ impl Cudnn {
pub fn convolution_forward<T>(
&self,
conv_config: &ConvolutionConfig,
workspace: *mut ::libc::c_void,
filter_data: *const ::libc::c_void,
src_desc: &TensorDescriptor,
src_data: *const ::libc::c_void,
Expand All @@ -241,7 +241,7 @@ impl Cudnn {
) -> Result<(), Error> {
API::convolution_forward(
*self.id_c(),
*conv_config.forward_algo(), *conv_config.conv_desc().id_c(), *conv_config.forward_workspace().id_c() as *mut ::libc::c_void, *conv_config.forward_workspace_size(),
*conv_config.forward_algo(), *conv_config.conv_desc().id_c(), workspace, *conv_config.forward_workspace_size(),
scale.a, *src_desc.id_c(), src_data, *conv_config.filter_desc().id_c(), filter_data,
scale.b, *dest_desc.id_c(), dest_data
)
Expand Down Expand Up @@ -271,6 +271,7 @@ impl Cudnn {
pub fn convolution_backward_filter<T>(
&self,
conv_config: &ConvolutionConfig,
workspace: *mut ::libc::c_void,
src_desc: &TensorDescriptor,
src_data: *const ::libc::c_void,
dest_grad_desc: &TensorDescriptor,
Expand All @@ -280,7 +281,7 @@ impl Cudnn {
) -> Result<(), Error> {
API::convolution_backward_filter(
*self.id_c(),
*conv_config.backward_filter_algo(), *conv_config.conv_desc().id_c(), *conv_config.backward_filter_workspace().id_c() as *mut ::libc::c_void, *conv_config.backward_filter_workspace_size(),
*conv_config.backward_filter_algo(), *conv_config.conv_desc().id_c(), workspace, *conv_config.backward_filter_workspace_size(),
scale.a, *src_desc.id_c(), src_data, *dest_grad_desc.id_c(), dest_grad_data,
scale.b, *conv_config.filter_desc().id_c(), filter_data
)
Expand All @@ -292,6 +293,7 @@ impl Cudnn {
pub fn convolution_backward_data<T>(
&self,
conv_config: &ConvolutionConfig,
workspace: *mut ::libc::c_void,
filter_data: *const ::libc::c_void,
dest_grad_desc: &TensorDescriptor,
dest_grad_data: *const ::libc::c_void,
Expand All @@ -301,7 +303,7 @@ impl Cudnn {
) -> Result<(), Error> {
API::convolution_backward_data(
*self.id_c(),
*conv_config.backward_data_algo(), *conv_config.conv_desc().id_c(), *conv_config.backward_data_workspace().id_c() as *mut ::libc::c_void, *conv_config.backward_data_workspace_size(),
*conv_config.backward_data_algo(), *conv_config.conv_desc().id_c(), workspace, *conv_config.backward_data_workspace_size(),
scale.a, *conv_config.filter_desc().id_c(), filter_data, *dest_grad_desc.id_c(), dest_grad_data,
scale.b, *src_grad_desc.id_c(), src_grad_data
)
Expand Down
1 change: 0 additions & 1 deletion cudnn/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@

extern crate libc;
extern crate cudnn_sys as ffi;
extern crate collenchyma as co;

pub use ffi::*;
pub use self::cudnn::Cudnn;
Expand Down
38 changes: 13 additions & 25 deletions cudnn/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
use super::{ConvolutionDescriptor, NormalizationDescriptor, FilterDescriptor, PoolingDescriptor};
use ffi::*;
use std::marker::PhantomData;
use co::frameworks::cuda::Memory;

#[derive(Debug, Copy, Clone)]
/// Defines the available data types for the CUDA cuDNN data representation.
Expand All @@ -24,11 +23,8 @@ pub struct ConvolutionConfig {
forward_algo: cudnnConvolutionFwdAlgo_t,
backward_filter_algo: cudnnConvolutionBwdFilterAlgo_t,
backward_data_algo: cudnnConvolutionBwdDataAlgo_t,
forward_workspace: Memory,
forward_workspace_size: usize,
backward_filter_workspace: Memory,
backward_filter_workspace_size: usize,
backward_data_workspace: Memory,
backward_data_workspace_size: usize,
conv_desc: ConvolutionDescriptor,
filter_desc: FilterDescriptor,
Expand All @@ -38,42 +34,44 @@ impl ConvolutionConfig {
/// Returns a new ConvolutionConfig
pub fn new(
algo_fwd: cudnnConvolutionFwdAlgo_t,
workspace_fwd: Memory,
workspace_size_fwd: usize,
algo_filter_bwd: cudnnConvolutionBwdFilterAlgo_t,
workspace_filter_bwd: Memory,
workspace_filter_size_bwd: usize,
algo_data_bwd: cudnnConvolutionBwdDataAlgo_t,
workspace_data_bwd: Memory,
workspace_data_size_bwd: usize,
conv_desc: ConvolutionDescriptor,
filter_desc: FilterDescriptor,
) -> ConvolutionConfig {
ConvolutionConfig {
forward_algo: algo_fwd,
forward_workspace: workspace_fwd,
forward_workspace_size: workspace_size_fwd,
backward_filter_algo: algo_filter_bwd,
backward_filter_workspace: workspace_filter_bwd,
backward_filter_workspace_size: workspace_filter_size_bwd,
backward_data_algo: algo_data_bwd,
backward_data_workspace: workspace_data_bwd,
backward_data_workspace_size: workspace_data_size_bwd,
conv_desc: conv_desc,
filter_desc: filter_desc,
}
}

/// Returns the largest workspace size out of the three.
///
/// Useful for creating a shared workspace.
pub fn largest_workspace_size(&self) -> &usize {
if self.backward_data_workspace_size() >= self.backward_filter_workspace_size() && self.backward_data_workspace_size() >= self.forward_workspace_size() {
self.backward_data_workspace_size()
} else if self.backward_filter_workspace_size() >= self.backward_data_workspace_size() && self.backward_filter_workspace_size() >= self.forward_workspace_size() {
self.backward_filter_workspace_size()
} else {
self.forward_workspace_size()
}
}

/// Returns `forward_algo`.
pub fn forward_algo(&self) -> &cudnnConvolutionFwdAlgo_t {
&self.forward_algo
}

/// Returns `forward_workspace`.
pub fn forward_workspace(&self) -> &Memory {
&self.forward_workspace
}

/// Returns `forward_workspace_size`.
pub fn forward_workspace_size(&self) -> &usize {
&self.forward_workspace_size
Expand All @@ -84,11 +82,6 @@ impl ConvolutionConfig {
&self.backward_filter_algo
}

/// Returns `backward_filter_workspace`.
pub fn backward_filter_workspace(&self) -> &Memory {
&self.backward_filter_workspace
}

/// Returns `backward_filter_workspace_size`.
pub fn backward_filter_workspace_size(&self) -> &usize {
&self.backward_filter_workspace_size
Expand All @@ -99,11 +92,6 @@ impl ConvolutionConfig {
&self.backward_data_algo
}

/// Returns `backward_data_workspace`.
pub fn backward_data_workspace(&self) -> &Memory {
&self.backward_data_workspace
}

/// Returns `backward_data_workspace_size`.
pub fn backward_data_workspace_size(&self) -> &usize {
&self.backward_data_workspace_size
Expand Down

0 comments on commit d1e106c

Please sign in to comment.