about summary refs log tree commit diff
path: root/pkgs/development/python-modules/torch/bin.nix
blob: 7f90712f50db8caac56698bf1b3704c737972a2b (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
{
  lib,
  stdenv,
  buildPythonPackage,
  autoAddDriverRunpath,
  fetchurl,
  python,
  pythonAtLeast,
  pythonOlder,
  addOpenGLRunpath,
  cudaPackages,
  future,
  numpy,
  autoPatchelfHook,
  pyyaml,
  requests,
  setuptools,
  typing-extensions,
  sympy,
  jinja2,
  networkx,
  filelock,
  openai-triton,
}:

let
  pyVerNoDot = builtins.replaceStrings [ "." ] [ "" ] python.pythonVersion;
  srcs = import ./binary-hashes.nix version;
  unsupported = throw "Unsupported system";
  version = "2.3.0";
in
buildPythonPackage {
  inherit version;

  pname = "torch";
  # Don't forget to update torch to the same version.

  format = "wheel";

  disabled = (pythonOlder "3.8") || (pythonAtLeast "3.13");

  src = fetchurl srcs."${stdenv.system}-${pyVerNoDot}" or unsupported;

  nativeBuildInputs = lib.optionals stdenv.isLinux [
    addOpenGLRunpath
    autoPatchelfHook
    autoAddDriverRunpath
  ];

  buildInputs = lib.optionals stdenv.isLinux (
    with cudaPackages;
    [
      # $out/${sitePackages}/nvfuser/_C*.so wants libnvToolsExt.so.1 but torch/lib only ships
      # libnvToolsExt-$hash.so.1
      cuda_nvtx

      cuda_cudart
      cuda_cupti
      cuda_nvrtc
      cudnn
      libcublas
      libcufft
      libcurand
      libcusolver
      libcusparse
      nccl
    ]
  );

  autoPatchelfIgnoreMissingDeps = lib.optionals stdenv.isLinux [
    # This is the hardware-dependent userspace driver that comes from
    # nvidia_x11 package. It must be deployed at runtime in
    # /run/opengl-driver/lib or pointed at by LD_LIBRARY_PATH variable, rather
    # than pinned in runpath
    "libcuda.so.1"
  ];

  propagatedBuildInputs = [
    future
    numpy
    pyyaml
    requests
    setuptools
    typing-extensions
    sympy
    jinja2
    networkx
    filelock
  ] ++ lib.optionals (stdenv.isLinux && stdenv.isx86_64) [ openai-triton ];

  postInstall = ''
    # ONNX conversion
    rm -rf $out/bin
  '';

  postFixup = lib.optionalString stdenv.isLinux ''
    addAutoPatchelfSearchPath "$out/${python.sitePackages}/torch/lib"
  '';

  # The wheel-binary is not stripped to avoid the error of `ImportError: libtorch_cuda_cpp.so: ELF load command address/offset not properly aligned.`.
  dontStrip = true;

  pythonImportsCheck = [ "torch" ];

  meta = with lib; {
    description = "PyTorch: Tensors and Dynamic neural networks in Python with strong GPU acceleration";
    homepage = "https://pytorch.org/";
    changelog = "https://github.com/pytorch/pytorch/releases/tag/v${version}";
    # Includes CUDA and Intel MKL, but redistributions of the binary are not limited.
    # https://docs.nvidia.com/cuda/eula/index.html
    # https://www.intel.com/content/www/us/en/developer/articles/license/onemkl-license-faq.html
    # torch's license is BSD3.
    # torch-bin used to vendor CUDA. It still links against CUDA and MKL.
    license = with licenses; [
      bsd3
      issl
      unfreeRedistributable
    ];
    sourceProvenance = with sourceTypes; [ binaryNativeCode ];
    platforms = [
      "aarch64-darwin"
      "aarch64-linux"
      "x86_64-linux"
    ];
    hydraPlatforms = [ ]; # output size 3.2G on 1.11.0
    maintainers = with maintainers; [ junjihashimoto ];
  };
}