about summary refs log tree commit diff
path: root/nixpkgs/pkgs/development/python-modules/torch/bin.nix
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/pkgs/development/python-modules/torch/bin.nix')
-rw-r--r--nixpkgs/pkgs/development/python-modules/torch/bin.nix27
1 files changed, 13 insertions, 14 deletions
diff --git a/nixpkgs/pkgs/development/python-modules/torch/bin.nix b/nixpkgs/pkgs/development/python-modules/torch/bin.nix
index 9b12470cdde8..bea1769439a0 100644
--- a/nixpkgs/pkgs/development/python-modules/torch/bin.nix
+++ b/nixpkgs/pkgs/development/python-modules/torch/bin.nix
@@ -24,7 +24,7 @@ let
   pyVerNoDot = builtins.replaceStrings [ "." ] [ "" ] python.pythonVersion;
   srcs = import ./binary-hashes.nix version;
   unsupported = throw "Unsupported system";
-  version = "2.1.2";
+  version = "2.2.0";
 in buildPythonPackage {
   inherit version;
 
@@ -47,6 +47,17 @@ in buildPythonPackage {
     # $out/${sitePackages}/nvfuser/_C*.so wants libnvToolsExt.so.1 but torch/lib only ships
     # libnvToolsExt-$hash.so.1
     cuda_nvtx
+
+    cuda_cudart
+    cuda_cupti
+    cuda_nvrtc
+    cudnn
+    libcublas
+    libcufft
+    libcurand
+    libcusolver
+    libcusparse
+    nccl
   ]);
 
   autoPatchelfIgnoreMissingDeps = lib.optionals stdenv.isLinux [
@@ -79,18 +90,6 @@ in buildPythonPackage {
 
   postFixup = lib.optionalString stdenv.isLinux ''
     addAutoPatchelfSearchPath "$out/${python.sitePackages}/torch/lib"
-
-    patchelf $out/${python.sitePackages}/torch/lib/libcudnn.so.8 --add-needed libcudnn_cnn_infer.so.8
-
-    pushd $out/${python.sitePackages}/torch/lib || exit 1
-      for LIBNVRTC in ./libnvrtc*
-      do
-        case "$LIBNVRTC" in
-          ./libnvrtc-builtins*) true;;
-          ./libnvrtc*) patchelf "$LIBNVRTC" --add-needed libnvrtc-builtins* ;;
-        esac
-      done
-    popd || exit 1
   '';
 
   # The wheel-binary is not stripped to avoid the error of `ImportError: libtorch_cuda_cpp.so: ELF load command address/offset not properly aligned.`.
@@ -106,7 +105,7 @@ in buildPythonPackage {
     # https://docs.nvidia.com/cuda/eula/index.html
     # https://www.intel.com/content/www/us/en/developer/articles/license/onemkl-license-faq.html
     # torch's license is BSD3.
-    # torch-bin includes CUDA and MKL binaries, therefore unfreeRedistributable is set.
+    # torch-bin used to vendor CUDA. It still links against CUDA and MKL.
     license = with licenses; [ bsd3 issl unfreeRedistributable ];
     sourceProvenance = with sourceTypes; [ binaryNativeCode ];
     platforms = [ "aarch64-darwin" "aarch64-linux" "x86_64-darwin" "x86_64-linux" ];