diff options
Diffstat (limited to 'nixpkgs/pkgs/development/cuda-modules/tensorrt')
3 files changed, 259 insertions, 0 deletions
diff --git a/nixpkgs/pkgs/development/cuda-modules/tensorrt/fixup.nix b/nixpkgs/pkgs/development/cuda-modules/tensorrt/fixup.nix new file mode 100644 index 000000000000..d713189328ed --- /dev/null +++ b/nixpkgs/pkgs/development/cuda-modules/tensorrt/fixup.nix @@ -0,0 +1,113 @@ +{ + cudaVersion, + final, + hostPlatform, + lib, + mkVersionedPackageName, + package, + patchelf, + requireFile, + ... +}: +let + inherit (lib) + maintainers + meta + strings + versions + ; +in +finalAttrs: prevAttrs: { + # Useful for inspecting why something went wrong. + brokenConditions = + let + cudaTooOld = strings.versionOlder cudaVersion package.minCudaVersion; + cudaTooNew = + (package.maxCudaVersion != null) && strings.versionOlder package.maxCudaVersion cudaVersion; + cudnnVersionIsSpecified = package.cudnnVersion != null; + cudnnVersionSpecified = versions.majorMinor package.cudnnVersion; + cudnnVersionProvided = versions.majorMinor finalAttrs.passthru.cudnn.version; + cudnnTooOld = + cudnnVersionIsSpecified && (strings.versionOlder cudnnVersionProvided cudnnVersionSpecified); + cudnnTooNew = + cudnnVersionIsSpecified && (strings.versionOlder cudnnVersionSpecified cudnnVersionProvided); + in + prevAttrs.brokenConditions + // { + "CUDA version is too old" = cudaTooOld; + "CUDA version is too new" = cudaTooNew; + "CUDNN version is too old" = cudnnTooOld; + "CUDNN version is too new" = cudnnTooNew; + }; + + src = requireFile { + name = package.filename; + inherit (package) hash; + message = '' + To use the TensorRT derivation, you must join the NVIDIA Developer Program and + download the ${package.version} TAR package for CUDA ${cudaVersion} from + ${finalAttrs.meta.homepage}. + + Once you have downloaded the file, add it to the store with the following + command, and try building this derivation again. + + $ nix-store --add-fixed sha256 ${package.filename} + ''; + }; + + # We need to look inside the extracted output to get the files we need. + sourceRoot = "TensorRT-${finalAttrs.version}"; + + buildInputs = prevAttrs.buildInputs ++ [finalAttrs.passthru.cudnn.lib]; + + preInstall = + let + targetArch = + if hostPlatform.isx86_64 then + "x86_64-linux-gnu" + else if hostPlatform.isAarch64 then + "aarch64-linux-gnu" + else + throw "Unsupported architecture"; + in + (prevAttrs.preInstall or "") + + '' + # Replace symlinks to bin and lib with the actual directories from targets. + for dir in bin lib; do + rm "$dir" + mv "targets/${targetArch}/$dir" "$dir" + done + ''; + + # Tell autoPatchelf about runtime dependencies. + postFixup = + let + versionTriple = "${versions.majorMinor finalAttrs.version}.${versions.patch finalAttrs.version}"; + in + (prevAttrs.postFixup or "") + + '' + ${meta.getExe' patchelf "patchelf"} --add-needed libnvinfer.so \ + "$lib/lib/libnvinfer.so.${versionTriple}" \ + "$lib/lib/libnvinfer_plugin.so.${versionTriple}" \ + "$lib/lib/libnvinfer_builder_resource.so.${versionTriple}" + ''; + + passthru = { + useCudatoolkitRunfile = strings.versionOlder cudaVersion "11.3.999"; + # The CUDNN used with TensorRT. + # If null, the default cudnn derivation will be used. + # If a version is specified, the cudnn derivation with that version will be used, + # unless it is not available, in which case the default cudnn derivation will be used. + cudnn = + let + desiredName = mkVersionedPackageName "cudnn" package.cudnnVersion; + desiredIsAvailable = final ? desiredName; + in + if package.cudnnVersion == null || !desiredIsAvailable then final.cudnn else final.${desiredName}; + }; + + meta = prevAttrs.meta // { + homepage = "https://developer.nvidia.com/tensorrt"; + maintainers = prevAttrs.meta.maintainers ++ [maintainers.aidalgol]; + }; +} diff --git a/nixpkgs/pkgs/development/cuda-modules/tensorrt/releases.nix b/nixpkgs/pkgs/development/cuda-modules/tensorrt/releases.nix new file mode 100644 index 000000000000..d6a1f0487dd4 --- /dev/null +++ b/nixpkgs/pkgs/development/cuda-modules/tensorrt/releases.nix @@ -0,0 +1,130 @@ +# NOTE: Check https://developer.nvidia.com/nvidia-tensorrt-8x-download. +# Version policy is to keep the latest minor release for each major release. +{ + tensorrt.releases = { + # jetson + linux-aarch64 = []; + # powerpc + linux-ppc64le = []; + # server-grade arm + linux-sbsa = [ + { + version = "8.2.5.1"; + minCudaVersion = "11.4"; + maxCudaVersion = "11.4"; + cudnnVersion = "8.2"; + filename = "TensorRT-8.2.5.1.Ubuntu-20.04.aarch64-gnu.cuda-11.4.cudnn8.2.tar.gz"; + hash = "sha256-oWfQ3lq2aoMPv65THeotnMilTzP+QWqKeToLU8eO+qo="; + } + { + version = "8.4.3.1"; + minCudaVersion = "11.6"; + maxCudaVersion = "11.6"; + cudnnVersion = "8.4"; + filename = "TensorRT-8.4.3.1.Ubuntu-20.04.aarch64-gnu.cuda-11.6.cudnn8.4.tar.gz"; + hash = "sha256-9tLlrB8cKYFvN2xF0Pol5CZs06iuuI5mq+6jpzD8wWI="; + } + { + version = "8.5.3.1"; + minCudaVersion = "11.8"; + maxCudaVersion = "11.8"; + cudnnVersion = "8.6"; + filename = "TensorRT-8.5.3.1.Ubuntu-20.04.aarch64-gnu.cuda-11.8.cudnn8.6.tar.gz"; + hash = "sha256-GW//mX0brvN/waHo9Wd07xerOEz3X/H/HAW2ZehYtTA="; + } + { + version = "8.6.1.6"; + minCudaVersion = "12.0"; + maxCudaVersion = "12.0"; + cudnnVersion = null; + filename = "TensorRT-8.6.1.6.Ubuntu-20.04.aarch64-gnu.cuda-12.0.tar.gz"; + hash = "sha256-Lc4+v/yBr17VlecCSFMLUDlXMTYV68MGExwnUjGme5E="; + } + ]; + # x86_64 + linux-x86_64 = [ + { + version = "8.0.3.4"; + minCudaVersion = "10.2"; + maxCudaVersion = "10.2"; + cudnnVersion = "8.2"; + filename = "TensorRT-8.0.3.4.Linux.x86_64-gnu.cuda-10.2.cudnn8.2.tar.gz"; + hash = "sha256-LxcXgwe1OCRfwDsEsNLIkeNsOcx3KuF5Sj+g2dY6WD0="; + } + { + version = "8.0.3.4"; + minCudaVersion = "11.0"; + maxCudaVersion = "11.3"; + cudnnVersion = "8.2"; + filename = "TensorRT-8.0.3.4.Linux.x86_64-gnu.cuda-11.3.cudnn8.2.tar.gz"; + hash = "sha256-MXdDUCT/SqWm26jB7QarEcwOG/O7cS36Y6Q0IvQTE/M="; + } + { + version = "8.2.5.1"; + minCudaVersion = "10.2"; + maxCudaVersion = "10.2"; + cudnnVersion = "8.2"; + filename = "TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-10.2.cudnn8.2.tar.gz"; + hash = "sha256-XV2Bf2LH8OM2GEMjV80MDweb1hSVF/wFUcaW3KP2m8Q="; + } + { + # The docs claim this supports through 11.5 despite the file name indicating 11.4. + version = "8.2.5.1"; + minCudaVersion = "11.0"; + maxCudaVersion = "11.5"; + cudnnVersion = "8.2"; + filename = "TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz"; + hash = "sha256-LcNpYvDiT7AavqzK1MRlijo2qDN7jznigeS77US713E="; + } + { + version = "8.4.3.1"; + minCudaVersion = "10.2"; + maxCudaVersion = "10.2"; + cudnnVersion = "8.4"; + filename = "TensorRT-8.4.3.1.Linux.x86_64-gnu.cuda-10.2.cudnn8.4.tar.gz"; + hash = "sha256-2c3Zzt93FBWWQtrSIvpbzzS6BT9s0NzALzdwXGLOZEU="; + } + { + # The docs claim this supports through 11.7 despite the file name indicating 11.6. + version = "8.4.3.1"; + minCudaVersion = "11.0"; + maxCudaVersion = "11.7"; + cudnnVersion = "8.4"; + filename = "TensorRT-8.4.3.1.Linux.x86_64-gnu.cuda-11.6.cudnn8.4.tar.gz"; + hash = "sha256-jXwghcFjncxzh1BIwjWYqFJs4wiRNoduMdkCWOSeT2E="; + } + { + version = "8.5.3.1"; + minCudaVersion = "10.2"; + maxCudaVersion = "10.2"; + cudnnVersion = "8.6"; + filename = "TensorRT-8.5.3.1.Linux.x86_64-gnu.cuda-10.2.cudnn8.6.tar.gz"; + hash = "sha256-WCt6yfOmFbrjqdYCj6AE2+s2uFpISwk6urP+2I0BnGQ="; + } + { + version = "8.5.3.1"; + minCudaVersion = "11.0"; + maxCudaVersion = "11.8"; + cudnnVersion = "8.6"; + filename = "TensorRT-8.5.3.1.Linux.x86_64-gnu.cuda-11.8.cudnn8.6.tar.gz"; + hash = "sha256-BNeuOYvPTUAfGxI0DVsNrX6Z/FAB28+SE0ptuGu7YDY="; + } + { + version = "8.6.1.6"; + minCudaVersion = "11.0"; + maxCudaVersion = "11.8"; + cudnnVersion = "8.9"; + filename = "TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz"; + hash = "sha256-Fb/mBT1F/uxF7McSOpEGB2sLQ/oENfJC2J3KB3gzd1k="; + } + { + version = "8.6.1.6"; + minCudaVersion = "12.0"; + maxCudaVersion = "12.1"; + cudnnVersion = "8.9"; + filename = "TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-12.0.tar.gz"; + hash = "sha256-D4FXpfxTKZQ7M4uJNZE3M1CvqQyoEjnNrddYDNHrolQ="; + } + ]; + }; +} diff --git a/nixpkgs/pkgs/development/cuda-modules/tensorrt/shims.nix b/nixpkgs/pkgs/development/cuda-modules/tensorrt/shims.nix new file mode 100644 index 000000000000..8be3e7988bb3 --- /dev/null +++ b/nixpkgs/pkgs/development/cuda-modules/tensorrt/shims.nix @@ -0,0 +1,16 @@ +# Shims to mimic the shape of ../modules/generic/manifests/{feature,redistrib}/release.nix +{package, redistArch}: +{ + featureRelease.${redistArch}.outputs = { + bin = true; + lib = true; + static = true; + dev = true; + sample = true; + python = true; + }; + redistribRelease = { + name = "TensorRT: a high-performance deep learning interface"; + inherit (package) version; + }; +} |