Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

error: ‘class at::Context’ has no member named ‘getCurrentCUDAStream… #98

Merged
merged 1 commit into from
Apr 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file not shown.
10 changes: 10 additions & 0 deletions networks/channelnorm_package/channelnorm_cuda.egg-info/PKG-INFO
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
Metadata-Version: 1.0
Name: channelnorm-cuda
Version: 0.0.0
Summary: UNKNOWN
Home-page: UNKNOWN
Author: UNKNOWN
Author-email: UNKNOWN
License: UNKNOWN
Description: UNKNOWN
Platform: UNKNOWN
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
channelnorm_cuda.cc
channelnorm_kernel.cu
setup.py
channelnorm_cuda.egg-info/PKG-INFO
channelnorm_cuda.egg-info/SOURCES.txt
channelnorm_cuda.egg-info/dependency_links.txt
channelnorm_cuda.egg-info/top_level.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
channelnorm_cuda
7 changes: 5 additions & 2 deletions networks/channelnorm_package/channelnorm_kernel.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>

#include "channelnorm_kernel.cuh"

Expand Down Expand Up @@ -109,7 +110,8 @@ void channelnorm_kernel_forward(

AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "channelnorm_forward", ([&] {

kernel_channelnorm_update_output<scalar_t><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentCUDAStream() >>>(
kernel_channelnorm_update_output<scalar_t><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<scalar_t>(),
input1_size,
Expand Down Expand Up @@ -149,7 +151,8 @@ void channelnorm_kernel_backward(

AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "channelnorm_backward_input1", ([&] {

kernel_channelnorm_backward_input1<scalar_t><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentCUDAStream() >>>(
kernel_channelnorm_backward_input1<scalar_t><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<scalar_t>(),
input1_size,
Expand Down
Binary file not shown.
Binary file not shown.
8 changes: 6 additions & 2 deletions networks/correlation_package/correlation_cuda.cc
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#include <torch/torch.h>
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <stdio.h>
#include <iostream>

Expand Down Expand Up @@ -71,7 +73,8 @@ int correlation_forward_cuda(at::Tensor& input1, at::Tensor& input2, at::Tensor&
stride1,
stride2,
corr_type_multiply,
at::globalContext().getCurrentCUDAStream()
at::cuda::getCurrentCUDAStream()
//at::globalContext().getCurrentCUDAStream()
);

//check for errors
Expand Down Expand Up @@ -152,7 +155,8 @@ int correlation_backward_cuda(at::Tensor& input1, at::Tensor& input2, at::Tensor
stride1,
stride2,
corr_type_multiply,
at::globalContext().getCurrentCUDAStream()
at::cuda::getCurrentCUDAStream()
//at::globalContext().getCurrentCUDAStream()
);

if (!success) {
Expand Down
10 changes: 10 additions & 0 deletions networks/correlation_package/correlation_cuda.egg-info/PKG-INFO
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
Metadata-Version: 1.0
Name: correlation-cuda
Version: 0.0.0
Summary: UNKNOWN
Home-page: UNKNOWN
Author: UNKNOWN
Author-email: UNKNOWN
License: UNKNOWN
Description: UNKNOWN
Platform: UNKNOWN
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
correlation_cuda.cc
correlation_cuda_kernel.cu
setup.py
correlation_cuda.egg-info/PKG-INFO
correlation_cuda.egg-info/SOURCES.txt
correlation_cuda.egg-info/dependency_links.txt
correlation_cuda.egg-info/top_level.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
correlation_cuda
Binary file not shown.
Binary file not shown.
Binary file not shown.
10 changes: 10 additions & 0 deletions networks/resample2d_package/resample2d_cuda.egg-info/PKG-INFO
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
Metadata-Version: 1.0
Name: resample2d-cuda
Version: 0.0.0
Summary: UNKNOWN
Home-page: UNKNOWN
Author: UNKNOWN
Author-email: UNKNOWN
License: UNKNOWN
Description: UNKNOWN
Platform: UNKNOWN
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
resample2d_cuda.cc
resample2d_kernel.cu
setup.py
resample2d_cuda.egg-info/PKG-INFO
resample2d_cuda.egg-info/SOURCES.txt
resample2d_cuda.egg-info/dependency_links.txt
resample2d_cuda.egg-info/top_level.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
resample2d_cuda
10 changes: 7 additions & 3 deletions networks/resample2d_package/resample2d_kernel.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>

#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
Expand Down Expand Up @@ -208,7 +209,8 @@ void resample2d_kernel_forward(
// TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] {

kernel_resample2d_update_output<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentCUDAStream() >>>(
kernel_resample2d_update_output<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
Expand Down Expand Up @@ -253,7 +255,8 @@ void resample2d_kernel_backward(

// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] {

kernel_resample2d_backward_input1<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentCUDAStream() >>>(
kernel_resample2d_backward_input1<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
Expand All @@ -280,7 +283,8 @@ void resample2d_kernel_backward(
// AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] {


kernel_resample2d_backward_input2<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentCUDAStream() >>>(
kernel_resample2d_backward_input2<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
Expand Down