Skip to content

Commit

Permalink
path
Browse files Browse the repository at this point in the history
  • Loading branch information
zhanghang1989 committed Nov 20, 2017
1 parent c92a7c2 commit 0f6efd8
Show file tree
Hide file tree
Showing 17 changed files with 279 additions and 339 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,8 @@ build/
data/
docs/src/
docs/html/
encoding/lib/
encoding/_ext/
encoding.egg-info/
experiments/recognition/
experiments/segmentation/
4 changes: 2 additions & 2 deletions LICENSE
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
Expand Down
9 changes: 6 additions & 3 deletions build.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

lib_path = os.path.join(os.path.dirname(torch.__file__), 'lib')
cwd = os.path.dirname(os.path.realpath(__file__))
encoding_lib_path = os.path.join(cwd, "encoding", "lib")

# clean the build files
clean_cmd = ['bash', 'clean.sh']
Expand All @@ -26,11 +27,12 @@
if platform.system() == 'Darwin':
os.environ['TH_LIBRARIES'] = os.path.join(lib_path,'libTH.1.dylib')
os.environ['THC_LIBRARIES'] = os.path.join(lib_path,'libTHC.1.dylib')
ENCODING_LIB = os.path.join(lib_path, 'libENCODING.dylib')
ENCODING_LIB = os.path.join(cwd, 'encoding/lib/libENCODING.dylib')

else:
os.environ['TH_LIBRARIES'] = os.path.join(lib_path,'libTH.so.1')
os.environ['THC_LIBRARIES'] = os.path.join(lib_path,'libTHC.so.1')
ENCODING_LIB = os.path.join(lib_path, 'libENCODING.so')
ENCODING_LIB = os.path.join(cwd, 'encoding/lib/libENCODING.so')

build_all_cmd = ['bash', 'encoding/make.sh']
subprocess.check_call(build_all_cmd, env=dict(os.environ))
Expand All @@ -44,7 +46,7 @@
with_cuda = True

include_path = [os.path.join(lib_path, 'include'),
os.path.join(lib_path,'include/ENCODING'),
os.path.join(cwd,'encoding/kernel'),
os.path.join(cwd,'encoding/kernel/include'),
os.path.join(cwd,'encoding/src/')]

Expand All @@ -65,6 +67,7 @@ def make_relative_rpath(path):
include_dirs = include_path,
extra_link_args = [
make_relative_rpath(lib_path),
make_relative_rpath(encoding_lib_path),
ENCODING_LIB,
],
)
Expand Down
2 changes: 1 addition & 1 deletion clean.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
#!/usr/bin/env bash

rm -rf build/ dist/ encoding.egg-info/ encoding/build/ encoding/_ext/ __pycache__ encoding/__pycache__
rm -rf build/ dist/ encoding.egg-info/ encoding/lib/ encoding/_ext/ __pycache__ encoding/__pycache__
8 changes: 5 additions & 3 deletions docs/source/dilated.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@
Dilated Networks
================

We provide correct dilated pre-trained ResNet and DenseNet for semantic segmentation.
For dilation of ResNet, we replace the stride of 2 Conv3x3 at begining of certain stage and update the dilation of the conv layers afterwards.
For dilation of DenseNet, we provide :class:`encoding.nn.DilatedAvgPool2d` that handles the dilation of the transition layers, then update the dilation of the conv layers afterwards.
We provide correct dilated pre-trained ResNet and DenseNet (stride of 8) for semantic segmentation.
For dilation of DenseNet, we provide :class:`encoding.nn.DilatedAvgPool2d`.
All provided models have been verified.

.. note::

This code is provided together with the paper (coming soon), please cite our work.

.. automodule:: encoding.dilated
.. currentmodule:: encoding.dilated
Expand Down
4 changes: 1 addition & 3 deletions docs/source/notes/compile.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,7 @@ Installing PyTorch-Encoding
Install from Source
-------------------

* Please follow the `PyTorch instructions <https://github.com/pytorch/pytorch#from-source>`_ to install PyTorch from Source to the ``$HOME`` directory (recommended). Or you can simply clone a copy to ``$HOME`` directory::

git clone https://github.com/pytorch/pytorch $HOME/pytorch
* Install PyTorch from Source (recommended). Please follow the `PyTorch instructions <https://github.com/pytorch/pytorch#from-source>`_.

* Install this package

Expand Down
4 changes: 2 additions & 2 deletions docs/source/utils.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@ Useful util functions.
.. automodule:: encoding.utils
.. currentmodule:: encoding.utils

:hidden:`CosLR_Scheduler`
:hidden:`LR_Scheduler`
~~~~~~~~~~~~~~~~~~~~~~~~~

.. autoclass:: CosLR_Scheduler
.. autoclass:: LR_Scheduler
:members:

:hidden:`get_optimizer`
Expand Down
7 changes: 1 addition & 6 deletions encoding/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,4 @@ IF(ENCODING_SO_VERSION)
SOVERSION ${ENCODING_SO_VERSION})
ENDIF(ENCODING_SO_VERSION)

FILE(GLOB kernel-header kernel/generic/*.h)
FILE(GLOB src-header src/generic/*.h)

INSTALL(TARGETS ENCODING LIBRARY DESTINATION ${ENCODING_INSTALL_LIB_SUBDIR})
INSTALL(FILES kernel/thc_encoding.h DESTINATION "${ENCODING_INSTALL_INCLUDE_SUBDIR}/ENCODING")
INSTALL(FILES ${src-header} ${kernel-header} DESTINATION "${ENCODING_INSTALL_INCLUDE_SUBDIR}/ENCODING/generic")
#INSTALL(TARGETS ENCODING LIBRARY DESTINATION ${ENCODING_INSTALL_LIB_SUBDIR})
59 changes: 9 additions & 50 deletions encoding/functions/syncbn.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,11 @@ def sum_square(input):
return _sum_square()(input)


class _batchnormtrain(Function):
class _batchnorm(Function):
def __init__(self, training=False):
super(_batchnorm, self).__init__()
self.training = training

def forward(ctx, input, gamma, beta, mean, std):
ctx.save_for_backward(input, gamma, beta, mean, std)
assert(input.dim()==3)
Expand Down Expand Up @@ -95,13 +99,13 @@ def backward(ctx, gradOutput):
encoding_lib.Encoding_Float_batchnorm_Backward(
gradOutput, input, gradInput, gradGamma, gradBeta,
mean, invstd, gamma, beta, gradMean, gradStd,
True)
self.training)
elif isinstance(input, torch.cuda.DoubleTensor):
with torch.cuda.device_of(input):
encoding_lib.Encoding_Double_batchnorm_Backward(
gradOutput, input, gradInput, gradGamma, gradBeta,
mean, invstd, gamma, beta, gradMean, gradStd,
True)
self.training)
else:
raise RuntimeError('Unimplemented data type!')
return gradInput, gradGamma, gradBeta, gradMean, gradStd
Expand All @@ -122,52 +126,7 @@ def batchnormtrain(input, gamma, beta, mean, std):
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
"""
return _batchnormtrain()(input, gamma, beta, mean, std)


class _batchnormeval(Function):
def forward(ctx, input, gamma, beta, mean, std):
ctx.save_for_backward(input, gamma, beta, mean, std)
assert(input.dim()==3)
with torch.cuda.device_of(input):
invstd = 1.0 / std
output = input.new().resize_as_(input)
if isinstance(input, torch.cuda.FloatTensor):
with torch.cuda.device_of(input):
encoding_lib.Encoding_Float_batchnorm_Forward(output,
input, mean, invstd, gamma, beta)
elif isinstance(input, torch.cuda.DoubleTensor):
with torch.cuda.device_of(input):
encoding_lib.Encoding_Double_batchnorm_Forward(output,
input, mean, invstd, gamma, beta)
else:
raise RuntimeError('Unimplemented data type!')
return output

def backward(ctx, gradOutput):
input, gamma, beta, mean, std = ctx.saved_tensors
invstd = 1.0 / std
with torch.cuda.device_of(input):
gradInput = gradOutput.new().resize_as_(input).zero_()
gradGamma = gradOutput.new().resize_as_(gamma).zero_()
gradBeta = gradOutput.new().resize_as_(beta).zero_()
gradMean = gradOutput.new().resize_as_(mean).zero_()
gradStd = gradOutput.new().resize_as_(std).zero_()
if isinstance(input, torch.cuda.FloatTensor):
with torch.cuda.device_of(input):
encoding_lib.Encoding_Float_batchnorm_Backward(
gradOutput, input, gradInput, gradGamma, gradBeta,
mean, invstd, gamma, beta, gradMean, gradStd,
False)
elif isinstance(input, torch.cuda.DoubleTensor):
with torch.cuda.device_of(input):
encoding_lib.Encoding_Double_batchnorm_Backward(
gradOutput, input, gradInput, gradGamma, gradBeta,
mean, invstd, gamma, beta, gradMean, gradStd,
False)
else:
raise RuntimeError('Unimplemented data type!')
return gradInput, gradGamma, gradBeta, gradMean, gradStd
return _batchnorm(True)(input, gamma, beta, mean, std)


def batchnormeval(input, gamma, beta, mean, std):
Expand All @@ -176,4 +135,4 @@ def batchnormeval(input, gamma, beta, mean, std):
Please see encoding.batchnormtrain_
"""
return _batchnormeval()(input, gamma, beta, mean, std)
return _batchnorm(False)(input, gamma, beta, mean, std)
5 changes: 2 additions & 3 deletions encoding/make.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
#!/usr/bin/env bash

mkdir -p encoding/build && cd encoding/build
mkdir -p encoding/lib && cd encoding/lib
# compile and install
cmake ..
make install
cd ..
make
2 changes: 1 addition & 1 deletion encoding/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

from .encoding import *
from .syncbn import *
from .basic import *
from .encoding import *
from .customize import *
Loading

1 comment on commit 0f6efd8

@zhanghang1989
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

make the compilation easier to the users, solves the following issue:
#12

Please sign in to comment.