diff --git a/pkgs/development/libraries/science/math/tiny-cuda-nn/default.nix b/pkgs/development/libraries/science/math/tiny-cuda-nn/default.nix new file mode 100644 index 000000000000000..fee97ae38873ffa --- /dev/null +++ b/pkgs/development/libraries/science/math/tiny-cuda-nn/default.nix @@ -0,0 +1,171 @@ +{ + stdenv, + cmake, + fetchFromGitHub, + lib, + ninja, + symlinkJoin, + which, + pythonPackages, + pythonSupport ? false, +}: +assert lib.asserts.assertMsg +(pythonPackages.torch.cudaSupport) +"tiny-cuda-nn requires torch to be built with cudaSupport"; let + inherit (lib) lists strings; + inherit (pythonPackages.torch) cudaPackages cudaSupport; + inherit (cudaPackages) cudatoolkit cudaFlags; + + cuda-redist = symlinkJoin { + name = "cuda-redist"; + paths = with cudaPackages; [ + cuda_cudart + cuda_nvcc + libcublas + libcusparse + libcusolver + ]; + }; + + dotlessSemicolonCudaCapabilities = let + dropPTX = builtins.filter (c: ! strings.hasSuffix "PTX" c) cudaFlags.cudaCapabilities; + dropDot = lists.map (strings.replaceStrings ["."] [""]) dropPTX; + in + strings.concatStringsSep ";" (lists.unique dropDot); + + flags = [ + "TCNN_BUILD_BENCHMARK=OFF" + "TCNN_BUILD_EXAMPLES=OFF" + "TCNN_ALLOW_CUBLAS_CUSOLVER=ON" + "TCNN_CUDA_ARCHITECTURES=${dotlessSemicolonCudaCapabilities}" + ]; +in + stdenv.mkDerivation (finalAttrs: { + name = "tiny-cuda-nn"; + version = "1.6"; + + format = strings.optionalString pythonSupport "setuptools"; + + src = fetchFromGitHub { + owner = "NVlabs"; + repo = finalAttrs.name; + rev = "v${finalAttrs.version}"; + fetchSubmodules = true; + hash = "sha256-qW6Fk2GB71fvZSsfu+mykabSxEKvaikZ/pQQZUycOy0="; + }; + + nativeBuildInputs = + [ + which + cmake + ninja + ] + ++ lists.optionals pythonSupport (with pythonPackages; [ + pip + wheel + setuptools + ]); + + buildInputs = + [ + cuda-redist + ] + ++ lib.optionals pythonSupport ( + with pythonPackages; [ + python + pybind11 + ] + ); + + # TODO: OpenCV, whicih also provides python bindings and supports CUDA, adds the cuda + # redistributable to propagatedBuildInputs. Should we do this here as well? + propagatedBuildInputs = lib.optionals pythonSupport ( + with pythonPackages; [ + torch + ] + ); + + # NOTE: We cannot use pythonImportsCheck for this module because it uses torch to immediately + # initailize CUDA. We cannot assume that at the time we run the check phase, the user has an + # NVIDIA GPU available. + # There are no tests for the C++ library or the python bindings, so we just skip the check + # phase. + doCheck = false; + + preConfigure = '' + export CUDA_HOME=${cuda-redist} + export LIBRARY_PATH=${cuda-redist}/lib/stubs:$LIBRARY_PATH + export CC=${cudatoolkit.cc}/bin/cc + export CXX=${cudatoolkit.cc}/bin/c++ + export CUDAHOSTCXX=${cudatoolkit.cc}/bin/c++ + ''; + + # When building the python bindings, we cannot re-use the artifacts from the C++ build so we + # skip the CMake confurePhase and the buildPhase. + dontUseCmakeConfigure = pythonSupport; + cmakeFlags = lists.map (x: "-D${x}") flags; + + # The configurePhase usually puts you in the build directory, so for the python bindings we + # need to change directories to the source directory. + configurePhase = strings.optionalString pythonSupport '' + runHook preConfigure + mkdir -p $NIX_BUILD_TOP/build + cd $NIX_BUILD_TOP/build + ${strings.concatStringsSep "\n" (lists.map (x: "export ${x}") flags)} + runHook postConfigure + ''; + + buildPhase = strings.optionalString pythonSupport '' + runHook preBuild + python -m pip wheel \ + --no-build-isolation \ + --no-clean \ + --no-deps \ + --no-index \ + --verbose \ + --wheel-dir $NIX_BUILD_TOP/build \ + $NIX_BUILD_TOP/source/bindings/torch + runHook postBuild + ''; + + installPhase = + '' + runHook preInstall + mkdir -p $out/lib + '' + # Installing the C++ library just requires copying the static library to the output directory + + strings.optionalString (!pythonSupport) '' + cp libtiny-cuda-nn.a $out/lib/ + '' + # Installing the python bindings requires building the wheel and installing it + + strings.optionalString pythonSupport '' + python -m pip install \ + --no-build-isolation \ + --no-cache-dir \ + --no-deps \ + --no-index \ + --no-warn-script-location \ + --prefix="$out" \ + --verbose \ + ./*.whl + '' + + '' + runHook postInstall + ''; + # the cv2/__init__.py just tries to check provide "nice user feedback" if the installation is bad + # however, this also causes infinite recursion when used by other packages + # rm -r $out/${pythonPackages.python.sitePackages}/cv2 + + passthru = { + inherit cudaPackages; + }; + + meta = with lib; { + description = "Lightning fast C++/CUDA neural network framework"; + homepage = "https://github.com/NVlabs/tiny-cuda-nn"; + license = licenses.bsd3; + maintainers = with maintainers; []; + platforms = platforms.linux; + broken = !cudaSupport; + }; + }) diff --git a/pkgs/top-level/all-packages.nix b/pkgs/top-level/all-packages.nix index 94fdba09127d562..0978f535f1aafc1 100644 --- a/pkgs/top-level/all-packages.nix +++ b/pkgs/top-level/all-packages.nix @@ -3765,6 +3765,8 @@ with pkgs; tensorflow-lite = callPackage ../development/libraries/science/math/tensorflow-lite { }; + tiny-cuda-nn = callPackage ../development/libraries/science/math/tiny-cuda-nn { }; + tezos-rust-libs = callPackage ../development/libraries/tezos-rust-libs { }; behave = with python3Packages; toPythonApplication behave; diff --git a/pkgs/top-level/python-packages.nix b/pkgs/top-level/python-packages.nix index 276cf90b4595a8d..d6c36211b24d512 100644 --- a/pkgs/top-level/python-packages.nix +++ b/pkgs/top-level/python-packages.nix @@ -11555,6 +11555,11 @@ self: super: with self; { timm = callPackage ../development/python-modules/timm { }; + tiny-cuda-nn = toPythonModule (pkgs.tiny-cuda-nn.override { + pythonSupport = true; + pythonPackages = self; + }); + tinycss2 = callPackage ../development/python-modules/tinycss2 { }; tinycss = callPackage ../development/python-modules/tinycss { };