about summary refs log tree commit diff
path: root/pkgs/development/libraries/science/math/tensorrt/generic.nix
diff options
context:
space:
mode:
Diffstat (limited to 'pkgs/development/libraries/science/math/tensorrt/generic.nix')
-rw-r--r--pkgs/development/libraries/science/math/tensorrt/generic.nix95
1 files changed, 0 insertions, 95 deletions
diff --git a/pkgs/development/libraries/science/math/tensorrt/generic.nix b/pkgs/development/libraries/science/math/tensorrt/generic.nix
deleted file mode 100644
index 2bcdd8e588cf0..0000000000000
--- a/pkgs/development/libraries/science/math/tensorrt/generic.nix
+++ /dev/null
@@ -1,95 +0,0 @@
-{ lib
-, backendStdenv
-, requireFile
-, autoPatchelfHook
-, autoAddOpenGLRunpathHook
-, cudaVersion
-, cudatoolkit
-, cudnn
-}:
-
-{ enable ? true
-, fullVersion
-, fileVersionCudnn ? null
-, tarball
-, sha256
-, supportedCudaVersions ? [ ]
-}:
-
-assert !enable || fileVersionCudnn == null || lib.assertMsg (lib.strings.versionAtLeast cudnn.version fileVersionCudnn)
-  "This version of TensorRT requires at least cuDNN ${fileVersionCudnn} (current version is ${cudnn.version})";
-
-backendStdenv.mkDerivation rec {
-  pname = "cudatoolkit-${cudatoolkit.majorVersion}-tensorrt";
-  version = fullVersion;
-  src = if !enable then null else
-  requireFile rec {
-    name = tarball;
-    inherit sha256;
-    message = ''
-      To use the TensorRT derivation, you must join the NVIDIA Developer Program and
-      download the ${version} Linux x86_64 TAR package for CUDA ${cudaVersion} from
-      ${meta.homepage}.
-
-      Once you have downloaded the file, add it to the store with the following
-      command, and try building this derivation again.
-
-      $ nix-store --add-fixed sha256 ${name}
-    '';
-  };
-
-  outputs = [ "out" "dev" ];
-
-  nativeBuildInputs = lib.optionals enable [
-    autoPatchelfHook
-    autoAddOpenGLRunpathHook
-  ];
-
-  # Used by autoPatchelfHook
-  buildInputs = lib.optionals enable [
-    backendStdenv.cc.cc.lib # libstdc++
-    cudatoolkit
-    cudnn
-  ];
-
-  sourceRoot = "TensorRT-${version}";
-
-  installPhase = ''
-    install --directory "$dev" "$out"
-    mv include "$dev"
-    mv targets/x86_64-linux-gnu/lib "$out"
-    install -D --target-directory="$out/bin" targets/x86_64-linux-gnu/bin/trtexec
-  '';
-
-  # Tell autoPatchelf about runtime dependencies.
-  # (postFixup phase is run before autoPatchelfHook.)
-  postFixup =
-    let
-      mostOfVersion = builtins.concatStringsSep "."
-        (lib.take 3 (lib.versions.splitVersion version));
-    in
-    ''
-      echo 'Patching RPATH of libnvinfer libs'
-      patchelf --debug --add-needed libnvinfer.so \
-        "$out/lib/libnvinfer.so.${mostOfVersion}" \
-        "$out/lib/libnvinfer_plugin.so.${mostOfVersion}" \
-        "$out/lib/libnvinfer_builder_resource.so.${mostOfVersion}"
-    '';
-
-  passthru.stdenv = backendStdenv;
-  passthru.enable = enable;
-
-  meta = with lib; {
-    # Check that the cudatoolkit version satisfies our min/max constraints (both
-    # inclusive). We mark the package as broken if it fails to satisfies the
-    # official version constraints (as recorded in default.nix). In some cases
-    # you _may_ be able to smudge version constraints, just know that you're
-    # embarking into unknown and unsupported territory when doing so.
-    broken = !enable || !(elem cudaVersion supportedCudaVersions);
-    description = "TensorRT: a high-performance deep learning interface";
-    homepage = "https://developer.nvidia.com/tensorrt";
-    license = licenses.unfree;
-    platforms = [ "x86_64-linux" ];
-    maintainers = with maintainers; [ aidalgol ];
-  };
-}