about summary refs log tree commit diff
path: root/nixpkgs/pkgs/tools/audio/openai-whisper-cpp/default.nix
blob: 20d2a23d1dd58f8e23f681a9cf3a5c3a7535fb19 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
{ lib
, stdenv
, fetchFromGitHub
, SDL2
, makeWrapper
, wget
, which
, Accelerate
, CoreGraphics
, CoreML
, CoreVideo
, MetalKit

, config
, cudaSupport ? config.cudaSupport
, cudaPackages ? {}
}:

let
  # It's necessary to consistently use backendStdenv when building with CUDA support,
  # otherwise we get libstdc++ errors downstream.
  # cuda imposes an upper bound on the gcc version, e.g. the latest gcc compatible with cudaPackages_11 is gcc11
  effectiveStdenv = if cudaSupport then cudaPackages.backendStdenv else stdenv;
in
effectiveStdenv.mkDerivation (finalAttrs: {
  pname = "whisper-cpp";
  version = "1.5.4";

  src = fetchFromGitHub {
    owner = "ggerganov";
    repo = "whisper.cpp";
    rev = "refs/tags/v${finalAttrs.version}" ;
    hash = "sha256-9H2Mlua5zx2WNXbz2C5foxIteuBgeCNALdq5bWyhQCk=";
  };

  # The upstream download script tries to download the models to the
  # directory of the script, which is not writable due to being
  # inside the nix store. This patch changes the script to download
  # the models to the current directory of where it is being run from.
  patches = [ ./download-models.patch ];

  nativeBuildInputs = [
      which
      makeWrapper
    ] ++ lib.optionals cudaSupport ( with cudaPackages ;[
      cuda_nvcc

      autoAddDriverRunpath
    ]);

  buildInputs = [
      SDL2
    ] ++ lib.optionals stdenv.isDarwin [
      Accelerate
      CoreGraphics
      CoreML
      CoreVideo
      MetalKit
    ] ++ lib.optionals cudaSupport ( with cudaPackages; [

      # A temporary hack for reducing the closure size, remove once cudaPackages
      # have stopped using lndir: https://github.com/NixOS/nixpkgs/issues/271792
      cuda_cccl.dev # provides nv/target
      cuda_cudart.dev
      cuda_cudart.lib
      cuda_cudart.static
      libcublas.dev
      libcublas.lib
      libcublas.static
    ]);

  postPatch = let
    cudaOldStr = "-lcuda ";
    cudaNewStr = "-lcuda -L${cudaPackages.cuda_cudart.lib}/lib/stubs ";
  in lib.optionalString cudaSupport ''
    substituteInPlace Makefile \
      --replace '${cudaOldStr}' '${cudaNewStr}'
  '';

  env = lib.optionalAttrs stdenv.isDarwin {
    WHISPER_COREML = "1";
    WHISPER_COREML_ALLOW_FALLBACK = "1";
  } // lib.optionalAttrs cudaSupport {
    WHISPER_CUBLAS = "1";
  };

  makeFlags = [ "main" "stream" "command" ];

  installPhase = ''
    runHook preInstall

    mkdir -p $out/bin
    cp ./main $out/bin/whisper-cpp
    cp ./stream $out/bin/whisper-cpp-stream
    cp ./command $out/bin/whisper-cpp-command

    cp models/download-ggml-model.sh $out/bin/whisper-cpp-download-ggml-model

    wrapProgram $out/bin/whisper-cpp-download-ggml-model \
      --prefix PATH : ${lib.makeBinPath [wget]}

    ${lib.optionalString stdenv.isDarwin ''
      install -Dt $out/share/whisper-cpp ggml-metal.metal

      for bin in whisper-cpp whisper-cpp-stream whisper-cpp-command; do
        wrapProgram $out/bin/$bin \
          --set-default GGML_METAL_PATH_RESOURCES $out/share/whisper-cpp
      done
    ''}

    runHook postInstall
  '';

  meta = with lib; {
    description = "Port of OpenAI's Whisper model in C/C++";
    longDescription = ''
      To download the models as described in the project's readme, you may
      use the `whisper-cpp-download-ggml-model` binary from this package.
    '';
    homepage = "https://github.com/ggerganov/whisper.cpp";
    license = licenses.mit;
    platforms = platforms.all;
    maintainers = with maintainers; [ dit7ya hughobrien ];
  };
})