Commit 3084908a authored by Ihar Hrachyshka's avatar Ihar Hrachyshka
Browse files

llama-cpp: remove workaround for local-ai

`local-ai` package is marked broken. It uses `overrideAttrs` to override
`llama-cpp` package, which is not advised and puts maintenance burden on
`llama-cpp` maintainers.

If and when the `local-ai` package is unbroken, it should stop using
`overrideAttrs` on `llama-cpp`. Instead, if they still have to build
against a non-standard `llama-cpp` revision, they should maintain the
`llama-cpp` derivation on their own.
parent e36a463d
Loading
Loading
Loading
Loading
+0 −13
Original line number Diff line number Diff line
@@ -91,19 +91,6 @@ effectiveStdenv.mkDerivation (finalAttrs: {

  patches = lib.optionals vulkanSupport [ ./disable_bfloat16.patch ];

  postPatch = ''
    # Workaround for local-ai package which overrides this package to an older llama-cpp
    if [ -f ./ggml/src/ggml-metal.m ]; then
      substituteInPlace ./ggml/src/ggml-metal.m \
        --replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
    fi

    if [ -f ./ggml/src/ggml-metal/ggml-metal.m ]; then
      substituteInPlace ./ggml/src/ggml-metal/ggml-metal.m \
        --replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
    fi
  '';

  nativeBuildInputs = [
    cmake
    ninja
+1 −1
Original line number Diff line number Diff line
@@ -112,7 +112,7 @@ let
          hash = "sha256-b9B5I3EbBFrkWc6RLXMWcCRKayyWjlGuQrogUcrISrc=";
          fetchSubmodules = true;
        };
        postPatch = prev.postPatch + ''
        postPatch = ''
          cd examples
          cp -r --no-preserve=mode ${src}/backend/cpp/llama grpc-server
          cp llava/clip* llava/llava.* grpc-server