Unverified Commit dfef8af6 authored by Pol Dellaiera's avatar Pol Dellaiera Committed by GitHub
Browse files

Merge pull request #326939 from abysssol/ollama-split-test

nixos/ollama: split cuda and rocm from service test
parents e133d5e8 aa8e0258
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -687,7 +687,9 @@ in {
  ocis = handleTest ./ocis.nix {};
  oddjobd = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./oddjobd.nix {};
  oh-my-zsh = handleTest ./oh-my-zsh.nix {};
  ollama = handleTest ./ollama.nix {};
  ollama = runTest ./ollama.nix;
  ollama-cuda = runTestOn ["x86_64-linux" "aarch64-linux"] ./ollama-cuda.nix;
  ollama-rocm = runTestOn ["x86_64-linux" "aarch64-linux"] ./ollama-rocm.nix;
  ombi = handleTest ./ombi.nix {};
  openarena = handleTest ./openarena.nix {};
  openldap = handleTest ./openldap.nix {};
+17 −0
Original line number Diff line number Diff line
{ lib, ... }:
{
  name = "ollama-cuda";
  meta.maintainers = with lib.maintainers; [ abysssol ];

  nodes.cuda =
    { ... }:
    {
      services.ollama.enable = true;
      services.ollama.acceleration = "cuda";
    };

  testScript = ''
    cuda.wait_for_unit("multi-user.target")
    cuda.wait_for_open_port(11434)
  '';
}
+17 −0
Original line number Diff line number Diff line
{ lib, ... }:
{
  name = "ollama-rocm";
  meta.maintainers = with lib.maintainers; [ abysssol ];

  nodes.rocm =
    { ... }:
    {
      services.ollama.enable = true;
      services.ollama.acceleration = "rocm";
    };

  testScript = ''
    rocm.wait_for_unit("multi-user.target")
    rocm.wait_for_open_port(11434)
  '';
}
+38 −41
Original line number Diff line number Diff line
import ./make-test-python.nix ({ pkgs, lib, ... }:
{ lib, ... }:
let
  mainPort = 11434;
  altPort = 11435;

  curlRequest = port: request:
    "curl http://127.0.0.1:${toString port}/api/generate -d '${builtins.toJSON request}'";

  prompt = {
    model = "tinydolphin";
    prompt = "lorem ipsum";
    options = {
      seed = 69;
      temperature = 0;
    };
  };
in
{
  name = "ollama";
  meta = with lib.maintainers; {
    maintainers = [ abysssol ];
  };
  meta.maintainers = with lib.maintainers; [ abysssol ];

  nodes = {
    cpu = { ... }: {
      services.ollama.enable = true;
    };

    rocm = { ... }: {
      services.ollama.enable = true;
      services.ollama.acceleration = "rocm";
    };

    cuda = { ... }: {
    cpu =
      { ... }:
      {
        services.ollama.enable = true;
      services.ollama.acceleration = "cuda";
      };

    altAddress = { ... }: {
    altAddress =
      { ... }:
      {
        services.ollama.enable = true;
        services.ollama.port = altPort;
      };
  };

  testScript = ''
    vms = [ cpu, rocm, cuda, altAddress ];
    import json

    start_all()
    for vm in vms:
        vm.wait_for_unit("multi-user.target")
    def curl_request_ollama(prompt, port):
      json_prompt = json.dumps(prompt)
      return f"""curl http://127.0.0.1:{port}/api/generate -d '{json_prompt}'"""

    stdout = cpu.succeed("""${curlRequest mainPort prompt}""", timeout=100)
    prompt = {
      "model": "tinydolphin",
      "prompt": "lorem ipsum",
      "options": {
        "seed": 69,
        "temperature": 0,
      },
    }


    vms = [
      (cpu, ${toString mainPort}),
      (altAddress, ${toString altPort}),
    ]

    stdout = altAddress.succeed("""${curlRequest altPort prompt}""", timeout=100)
    start_all()
    for (vm, port) in vms:
      vm.wait_for_unit("multi-user.target")
      vm.wait_for_open_port(port)
      stdout = vm.succeed(curl_request_ollama(prompt, port), timeout = 100)
  '';
})
}
+2 −0
Original line number Diff line number Diff line
@@ -214,6 +214,8 @@ goBuild ((lib.optionalAttrs enableRocm {
    };
  } // lib.optionalAttrs stdenv.isLinux {
    inherit ollama-rocm ollama-cuda;
    service-cuda = nixosTests.ollama-cuda;
    service-rocm = nixosTests.ollama-rocm;
  };

  meta = {