about summary refs log tree commit diff
path: root/pkgs/by-name/ol
diff options
context:
space:
mode:
Diffstat (limited to 'pkgs/by-name/ol')
-rw-r--r--pkgs/by-name/ol/ollama/disable-git.patch22
-rw-r--r--pkgs/by-name/ol/ollama/package.nix242
-rw-r--r--pkgs/by-name/ol/ollama/skip-rocm-cp.patch14
-rw-r--r--pkgs/by-name/ol/ols/package.nix59
-rw-r--r--pkgs/by-name/ol/olvid/package.nix10
5 files changed, 342 insertions, 5 deletions
diff --git a/pkgs/by-name/ol/ollama/disable-git.patch b/pkgs/by-name/ol/ollama/disable-git.patch
new file mode 100644
index 0000000000000..5248905302dc2
--- /dev/null
+++ b/pkgs/by-name/ol/ollama/disable-git.patch
@@ -0,0 +1,22 @@
+diff --git a/llm/generate/gen_common.sh b/llm/generate/gen_common.sh
+index 3825c155..d22eccd2 100644
+--- a/llm/generate/gen_common.sh
++++ b/llm/generate/gen_common.sh
+@@ -69,6 +69,8 @@ git_module_setup() {
+ }
+ 
+ apply_patches() {
++    return
++
+     # apply temporary patches until fix is upstream
+     for patch in ../patches/*.patch; do
+         git -c 'user.name=nobody' -c 'user.email=<>' -C ${LLAMACPP_DIR} am ${patch}
+@@ -133,6 +135,8 @@ install() {
+ 
+ # Keep the local tree clean after we're done with the build
+ cleanup() {
++    return
++
+     (cd ${LLAMACPP_DIR}/ && git checkout CMakeLists.txt)
+ 
+     if [ -n "$(ls -A ../patches/*.diff)" ]; then
diff --git a/pkgs/by-name/ol/ollama/package.nix b/pkgs/by-name/ol/ollama/package.nix
new file mode 100644
index 0000000000000..ababb3f68a70f
--- /dev/null
+++ b/pkgs/by-name/ol/ollama/package.nix
@@ -0,0 +1,242 @@
+{
+  lib,
+  buildGoModule,
+  fetchFromGitHub,
+  buildEnv,
+  linkFarm,
+  overrideCC,
+  makeWrapper,
+  stdenv,
+  addDriverRunpath,
+  nix-update-script,
+
+  cmake,
+  gcc12,
+  clblast,
+  libdrm,
+  rocmPackages,
+  cudaPackages,
+  darwin,
+  autoAddDriverRunpath,
+
+  nixosTests,
+  testers,
+  ollama,
+  ollama-rocm,
+  ollama-cuda,
+
+  config,
+  # one of `[ null false "rocm" "cuda" ]`
+  acceleration ? null,
+}:
+
+assert builtins.elem acceleration [
+  null
+  false
+  "rocm"
+  "cuda"
+];
+
+let
+  pname = "ollama";
+  # don't forget to invalidate all hashes each update
+  version = "0.3.11";
+
+  src = fetchFromGitHub {
+    owner = "ollama";
+    repo = "ollama";
+    rev = "v${version}";
+    hash = "sha256-YYrNrlXL6ytLfnrvSHybi0va0lvgVNuIRP+IFE5XZX8=";
+    fetchSubmodules = true;
+  };
+
+  vendorHash = "sha256-hSxcREAujhvzHVNwnRTfhi0MKI3s8HNavER2VLz6SYk=";
+
+  validateFallback = lib.warnIf (config.rocmSupport && config.cudaSupport) (lib.concatStrings [
+    "both `nixpkgs.config.rocmSupport` and `nixpkgs.config.cudaSupport` are enabled, "
+    "but they are mutually exclusive; falling back to cpu"
+  ]) (!(config.rocmSupport && config.cudaSupport));
+  shouldEnable =
+    mode: fallback: (acceleration == mode) || (fallback && acceleration == null && validateFallback);
+
+  rocmRequested = shouldEnable "rocm" config.rocmSupport;
+  cudaRequested = shouldEnable "cuda" config.cudaSupport;
+
+  enableRocm = rocmRequested && stdenv.hostPlatform.isLinux;
+  enableCuda = cudaRequested && stdenv.hostPlatform.isLinux;
+
+  rocmLibs = [
+    rocmPackages.clr
+    rocmPackages.hipblas
+    rocmPackages.rocblas
+    rocmPackages.rocsolver
+    rocmPackages.rocsparse
+    rocmPackages.rocm-device-libs
+    rocmPackages.rocm-smi
+  ];
+  rocmClang = linkFarm "rocm-clang" { llvm = rocmPackages.llvm.clang; };
+  rocmPath = buildEnv {
+    name = "rocm-path";
+    paths = rocmLibs ++ [ rocmClang ];
+  };
+
+  cudaLibs = [
+    cudaPackages.cuda_cudart
+    cudaPackages.libcublas
+    cudaPackages.cuda_cccl
+  ];
+  cudaToolkit = buildEnv {
+    name = "cuda-merged";
+    paths = map lib.getLib cudaLibs ++ [
+      (lib.getOutput "static" cudaPackages.cuda_cudart)
+      (lib.getBin (cudaPackages.cuda_nvcc.__spliced.buildHost or cudaPackages.cuda_nvcc))
+    ];
+  };
+
+  metalFrameworks = with darwin.apple_sdk_11_0.frameworks; [
+    Accelerate
+    Metal
+    MetalKit
+    MetalPerformanceShaders
+  ];
+
+  wrapperOptions =
+    [
+      # ollama embeds llama-cpp binaries which actually run the ai models
+      # these llama-cpp binaries are unaffected by the ollama binary's DT_RUNPATH
+      # LD_LIBRARY_PATH is temporarily required to use the gpu
+      # until these llama-cpp binaries can have their runpath patched
+      "--suffix LD_LIBRARY_PATH : '${addDriverRunpath.driverLink}/lib'"
+    ]
+    ++ lib.optionals enableRocm [
+      "--suffix LD_LIBRARY_PATH : '${rocmPath}/lib'"
+      "--set-default HIP_PATH '${rocmPath}'"
+    ]
+    ++ lib.optionals enableCuda [
+      "--suffix LD_LIBRARY_PATH : '${lib.makeLibraryPath (map lib.getLib cudaLibs)}'"
+    ];
+  wrapperArgs = builtins.concatStringsSep " " wrapperOptions;
+
+  goBuild =
+    if enableCuda then buildGoModule.override { stdenv = overrideCC stdenv gcc12; } else buildGoModule;
+  inherit (lib) licenses platforms maintainers;
+in
+goBuild {
+  inherit
+    pname
+    version
+    src
+    vendorHash
+    ;
+
+  env =
+    lib.optionalAttrs enableRocm {
+      ROCM_PATH = rocmPath;
+      CLBlast_DIR = "${clblast}/lib/cmake/CLBlast";
+    }
+    // lib.optionalAttrs enableCuda { CUDA_LIB_DIR = "${cudaToolkit}/lib"; };
+
+  nativeBuildInputs =
+    [ cmake ]
+    ++ lib.optionals enableRocm [ rocmPackages.llvm.bintools ]
+    ++ lib.optionals enableCuda [ cudaPackages.cuda_nvcc ]
+    ++ lib.optionals (enableRocm || enableCuda) [
+      makeWrapper
+      autoAddDriverRunpath
+    ]
+    ++ lib.optionals stdenv.hostPlatform.isDarwin metalFrameworks;
+
+  buildInputs =
+    lib.optionals enableRocm (rocmLibs ++ [ libdrm ])
+    ++ lib.optionals enableCuda cudaLibs
+    ++ lib.optionals stdenv.hostPlatform.isDarwin metalFrameworks;
+
+  patches = [
+    # disable uses of `git` in the `go generate` script
+    # ollama's build script assumes the source is a git repo, but nix removes the git directory
+    # this also disables necessary patches contained in `ollama/llm/patches/`
+    # those patches are applied in `postPatch`
+    ./disable-git.patch
+
+    # we provide our own deps at runtime
+    ./skip-rocm-cp.patch
+  ];
+
+  postPatch = ''
+    # replace inaccurate version number with actual release version
+    substituteInPlace version/version.go --replace-fail 0.0.0 '${version}'
+
+    # apply ollama's patches to `llama.cpp` submodule
+    for diff in llm/patches/*; do
+      patch -p1 -d llm/llama.cpp < $diff
+    done
+  '';
+
+  overrideModAttrs = (
+    finalAttrs: prevAttrs: {
+      # don't run llama.cpp build in the module fetch phase
+      preBuild = "";
+    }
+  );
+
+  preBuild = ''
+    # disable uses of `git`, since nix removes the git directory
+    export OLLAMA_SKIP_PATCHING=true
+    # build llama.cpp libraries for ollama
+    go generate ./...
+  '';
+
+  postFixup =
+    ''
+      # the app doesn't appear functional at the moment, so hide it
+      mv "$out/bin/app" "$out/bin/.ollama-app"
+    ''
+    + lib.optionalString (enableRocm || enableCuda) ''
+      # expose runtime libraries necessary to use the gpu
+      wrapProgram "$out/bin/ollama" ${wrapperArgs}
+    '';
+
+  ldflags = [
+    "-s"
+    "-w"
+    "-X=github.com/ollama/ollama/version.Version=${version}"
+    "-X=github.com/ollama/ollama/server.mode=release"
+  ];
+
+  passthru = {
+    tests =
+      {
+        inherit ollama;
+        version = testers.testVersion {
+          inherit version;
+          package = ollama;
+        };
+      }
+      // lib.optionalAttrs stdenv.hostPlatform.isLinux {
+        inherit ollama-rocm ollama-cuda;
+        service = nixosTests.ollama;
+        service-cuda = nixosTests.ollama-cuda;
+        service-rocm = nixosTests.ollama-rocm;
+      };
+
+    updateScript = nix-update-script { };
+  };
+
+  meta = {
+    description =
+      "Get up and running with large language models locally"
+      + lib.optionalString rocmRequested ", using ROCm for AMD GPU acceleration"
+      + lib.optionalString cudaRequested ", using CUDA for NVIDIA GPU acceleration";
+    homepage = "https://github.com/ollama/ollama";
+    changelog = "https://github.com/ollama/ollama/releases/tag/v${version}";
+    license = licenses.mit;
+    platforms = if (rocmRequested || cudaRequested) then platforms.linux else platforms.unix;
+    mainProgram = "ollama";
+    maintainers = with maintainers; [
+      abysssol
+      dit7ya
+      elohmeier
+      roydubnium
+    ];
+  };
+}
diff --git a/pkgs/by-name/ol/ollama/skip-rocm-cp.patch b/pkgs/by-name/ol/ollama/skip-rocm-cp.patch
new file mode 100644
index 0000000000000..672b3f24b4975
--- /dev/null
+++ b/pkgs/by-name/ol/ollama/skip-rocm-cp.patch
@@ -0,0 +1,14 @@
+diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh
+index 48d08fd0..e50f7b36 100755
+--- a/llm/generate/gen_linux.sh
++++ b/llm/generate/gen_linux.sh
+@@ -284,9 +284,6 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then
+     mkdir -p "${ROCM_DIST_DIR}"
+     for dep in $(ldd "${BUILD_DIR}/bin/ollama_llama_server" | grep "=>" | cut -f2 -d= | cut -f2 -d' ' | grep -v "${GOARCH}/rocm${ROCM_VARIANT}" | grep -e rocm -e amdgpu -e libtinfo -e libnuma -e libelf ); do
+         cp -a "${dep}"* "${ROCM_DIST_DIR}"
+-        if [ $(readlink -f "${dep}") != "${dep}" ] ; then
+-            cp $(readlink -f "${dep}") "${ROCM_DIST_DIR}"
+-        fi
+     done
+     install
+     dist
diff --git a/pkgs/by-name/ol/ols/package.nix b/pkgs/by-name/ol/ols/package.nix
new file mode 100644
index 0000000000000..ba42560650728
--- /dev/null
+++ b/pkgs/by-name/ol/ols/package.nix
@@ -0,0 +1,59 @@
+{
+  fetchFromGitHub,
+  lib,
+  makeBinaryWrapper,
+  odin,
+  stdenv,
+  unstableGitUpdater,
+}:
+
+stdenv.mkDerivation {
+  pname = "ols";
+  version = "0-unstable-2024-08-05";
+
+  src = fetchFromGitHub {
+    owner = "DanielGavin";
+    repo = "ols";
+    rev = "5f53ba1670b4bd44f6faf589823aa404f3c1a62b";
+    hash = "sha256-4Rw3eNXkmdRMLz9k1UaK6xr0KS4g4AnFpOcrWLos2jg=";
+  };
+
+  postPatch = ''
+    patchShebangs build.sh odinfmt.sh
+  '';
+
+  nativeBuildInputs = [ makeBinaryWrapper ];
+
+  buildInputs = [ odin ];
+
+  buildPhase = ''
+    runHook preBuild
+
+    ./build.sh && ./odinfmt.sh
+
+    runHook postBuild
+  '';
+
+  installPhase = ''
+    runHook preInstall
+
+    install -Dm755 ols odinfmt -t $out/bin/
+    wrapProgram $out/bin/ols --set-default ODIN_ROOT ${odin}/share
+
+    runHook postInstall
+  '';
+
+  passthru.updateScript = unstableGitUpdater { hardcodeZeroVersion = true; };
+
+  meta = {
+    inherit (odin.meta) platforms;
+    description = "Language server for the Odin programming language";
+    homepage = "https://github.com/DanielGavin/ols";
+    license = lib.licenses.mit;
+    maintainers = with lib.maintainers; [
+      astavie
+      znaniye
+    ];
+    mainProgram = "ols";
+  };
+}
diff --git a/pkgs/by-name/ol/olvid/package.nix b/pkgs/by-name/ol/olvid/package.nix
index bd0eb63a9af71..1d8ba41adb3c2 100644
--- a/pkgs/by-name/ol/olvid/package.nix
+++ b/pkgs/by-name/ol/olvid/package.nix
@@ -13,7 +13,7 @@
 , openjfx21
 , autoPatchelfHook
 , makeBinaryWrapper
-, wrapGAppsHook
+, wrapGAppsHook3
 }:
 
 let
@@ -53,20 +53,20 @@ in
 
 stdenv.mkDerivation (finalAttrs: {
   pname = "olvid";
-  version = "1.5.2";
+  version = "1.6.2";
 
   dontUnpack = true;
   dontWrapGApps = true;
 
   src = fetchurl {
     url = "https://static.olvid.io/linux/${repo}-${finalAttrs.version}.tar.gz";
-    hash = "sha256-WjIOk3dPSXQdAR2fdXseV0NdOjld0PzyqnUx/VbvQio=";
+    hash = "sha256-Cej8ei+Dh6yn7ZRZ1DE9ay/KWWLLALhaQ5gNpBw8vDs=";
   };
 
   nativeBuildInputs = [
     autoPatchelfHook
     makeBinaryWrapper
-    wrapGAppsHook
+    wrapGAppsHook3
   ];
 
   buildInputs = [
@@ -98,7 +98,7 @@ stdenv.mkDerivation (finalAttrs: {
   '';
 
   meta = with lib; {
-    description = "The secure french messenger";
+    description = "Secure french messenger";
     homepage = "https://www.olvid.io";
     license = licenses.agpl3Only;
     mainProgram = "olvid";