Loading maintainers/maintainer-list.nix +5 −0 Original line number Diff line number Diff line Loading @@ -12443,6 +12443,11 @@ githubId = 18661391; name = "Malte Janz"; }; malteneuss = { github = "malteneuss"; githubId = 5301202; name = "Malte Neuss"; }; malte-v = { email = "nixpkgs@mal.tc"; github = "malte-v"; Loading nixos/doc/manual/release-notes/rl-2405.section.md +2 −0 Original line number Diff line number Diff line Loading @@ -139,6 +139,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m - [ollama](https://ollama.ai), server for running large language models locally. - [nextjs-ollama-llm-ui](https://github.com/jakobhoeg/nextjs-ollama-llm-ui), light-weight frontend server to chat with Ollama models through a web app. - [ownCloud Infinite Scale Stack](https://owncloud.com/infinite-scale-4-0/), a modern and scalable rewrite of ownCloud. - [PhotonVision](https://photonvision.org/), a free, fast, and easy-to-use computer vision solution for the FIRST® Robotics Competition. Loading nixos/modules/module-list.nix +1 −0 Original line number Diff line number Diff line Loading @@ -1399,6 +1399,7 @@ ./services/web-apps/netbox.nix ./services/web-apps/nextcloud.nix ./services/web-apps/nextcloud-notify_push.nix ./services/web-apps/nextjs-ollama-llm-ui.nix ./services/web-apps/nexus.nix ./services/web-apps/nifi.nix ./services/web-apps/node-red.nix Loading nixos/modules/services/web-apps/nextjs-ollama-llm-ui.nix 0 → 100644 +87 −0 Original line number Diff line number Diff line { config, pkgs, lib, ... }: let cfg = config.services.nextjs-ollama-llm-ui; # we have to override the URL to a Ollama service here, because it gets baked into the web app. nextjs-ollama-llm-ui = cfg.package.override { ollamaUrl = "https://ollama.lambdablob.com"; }; in { options = { services.nextjs-ollama-llm-ui = { enable = lib.mkEnableOption '' Simple Ollama web UI service; an easy to use web frontend for a Ollama backend service. Run state-of-the-art AI large language models (LLM) similar to ChatGPT locally with privacy on your personal computer. This service is stateless and doesn't store any data on the server; all data is kept locally in your web browser. See https://github.com/jakobhoeg/nextjs-ollama-llm-ui. Required: You need the Ollama backend service running by having "services.nextjs-ollama-llm-ui.ollamaUrl" point to the correct url. You can host such a backend service with NixOS through "services.ollama". ''; package = lib.mkPackageOption pkgs "nextjs-ollama-llm-ui" { }; hostname = lib.mkOption { type = lib.types.str; default = "127.0.0.1"; example = "ui.example.org"; description = '' The hostname under which the Ollama UI interface should be accessible. By default it uses localhost/127.0.0.1 to be accessible only from the local machine. Change to "0.0.0.0" to make it directly accessible from the local network. Note: You should keep it at 127.0.0.1 and only serve to the local network or internet from a (home) server behind a reverse-proxy and secured encryption. See https://wiki.nixos.org/wiki/Nginx for instructions on how to set up a reverse-proxy. ''; }; port = lib.mkOption { type = lib.types.port; default = 3000; example = 3000; description = '' The port under which the Ollama UI interface should be accessible. ''; }; ollamaUrl = lib.mkOption { type = lib.types.str; default = "127.0.0.1:11434"; example = "https://ollama.example.org"; description = '' The address (including host and port) under which we can access the Ollama backend server. !Note that if the the UI service is running under a domain "https://ui.example.org", the Ollama backend service must allow "CORS" requests from this domain, e.g. by adding "services.ollama.environment.OLLAMA_ORIGINS = [ ... "https://ui.example.org" ];"! ''; }; }; }; config = lib.mkIf cfg.enable { systemd.services = { nextjs-ollama-llm-ui = { wantedBy = [ "multi-user.target" ]; description = "Nextjs Ollama LLM Ui."; after = [ "network.target" ]; environment = { HOSTNAME = cfg.hostname; PORT = toString cfg.port; NEXT_PUBLIC_OLLAMA_URL = cfg.ollamaUrl; }; serviceConfig = { ExecStart = "${lib.getExe nextjs-ollama-llm-ui}"; DynamicUser = true; }; }; }; }; meta.maintainers = with lib.maintainers; [ malteneuss ]; } nixos/tests/all-tests.nix +1 −0 Original line number Diff line number Diff line Loading @@ -616,6 +616,7 @@ in { # TODO: put in networking.nix after the test becomes more complete networkingProxy = handleTest ./networking-proxy.nix {}; nextcloud = handleTest ./nextcloud {}; nextjs-ollama-llm-ui = runTest ./web-apps/nextjs-ollama-llm-ui.nix; nexus = handleTest ./nexus.nix {}; # TODO: Test nfsv3 + Kerberos nfs3 = handleTest ./nfs { version = 3; }; Loading Loading
maintainers/maintainer-list.nix +5 −0 Original line number Diff line number Diff line Loading @@ -12443,6 +12443,11 @@ githubId = 18661391; name = "Malte Janz"; }; malteneuss = { github = "malteneuss"; githubId = 5301202; name = "Malte Neuss"; }; malte-v = { email = "nixpkgs@mal.tc"; github = "malte-v"; Loading
nixos/doc/manual/release-notes/rl-2405.section.md +2 −0 Original line number Diff line number Diff line Loading @@ -139,6 +139,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m - [ollama](https://ollama.ai), server for running large language models locally. - [nextjs-ollama-llm-ui](https://github.com/jakobhoeg/nextjs-ollama-llm-ui), light-weight frontend server to chat with Ollama models through a web app. - [ownCloud Infinite Scale Stack](https://owncloud.com/infinite-scale-4-0/), a modern and scalable rewrite of ownCloud. - [PhotonVision](https://photonvision.org/), a free, fast, and easy-to-use computer vision solution for the FIRST® Robotics Competition. Loading
nixos/modules/module-list.nix +1 −0 Original line number Diff line number Diff line Loading @@ -1399,6 +1399,7 @@ ./services/web-apps/netbox.nix ./services/web-apps/nextcloud.nix ./services/web-apps/nextcloud-notify_push.nix ./services/web-apps/nextjs-ollama-llm-ui.nix ./services/web-apps/nexus.nix ./services/web-apps/nifi.nix ./services/web-apps/node-red.nix Loading
nixos/modules/services/web-apps/nextjs-ollama-llm-ui.nix 0 → 100644 +87 −0 Original line number Diff line number Diff line { config, pkgs, lib, ... }: let cfg = config.services.nextjs-ollama-llm-ui; # we have to override the URL to a Ollama service here, because it gets baked into the web app. nextjs-ollama-llm-ui = cfg.package.override { ollamaUrl = "https://ollama.lambdablob.com"; }; in { options = { services.nextjs-ollama-llm-ui = { enable = lib.mkEnableOption '' Simple Ollama web UI service; an easy to use web frontend for a Ollama backend service. Run state-of-the-art AI large language models (LLM) similar to ChatGPT locally with privacy on your personal computer. This service is stateless and doesn't store any data on the server; all data is kept locally in your web browser. See https://github.com/jakobhoeg/nextjs-ollama-llm-ui. Required: You need the Ollama backend service running by having "services.nextjs-ollama-llm-ui.ollamaUrl" point to the correct url. You can host such a backend service with NixOS through "services.ollama". ''; package = lib.mkPackageOption pkgs "nextjs-ollama-llm-ui" { }; hostname = lib.mkOption { type = lib.types.str; default = "127.0.0.1"; example = "ui.example.org"; description = '' The hostname under which the Ollama UI interface should be accessible. By default it uses localhost/127.0.0.1 to be accessible only from the local machine. Change to "0.0.0.0" to make it directly accessible from the local network. Note: You should keep it at 127.0.0.1 and only serve to the local network or internet from a (home) server behind a reverse-proxy and secured encryption. See https://wiki.nixos.org/wiki/Nginx for instructions on how to set up a reverse-proxy. ''; }; port = lib.mkOption { type = lib.types.port; default = 3000; example = 3000; description = '' The port under which the Ollama UI interface should be accessible. ''; }; ollamaUrl = lib.mkOption { type = lib.types.str; default = "127.0.0.1:11434"; example = "https://ollama.example.org"; description = '' The address (including host and port) under which we can access the Ollama backend server. !Note that if the the UI service is running under a domain "https://ui.example.org", the Ollama backend service must allow "CORS" requests from this domain, e.g. by adding "services.ollama.environment.OLLAMA_ORIGINS = [ ... "https://ui.example.org" ];"! ''; }; }; }; config = lib.mkIf cfg.enable { systemd.services = { nextjs-ollama-llm-ui = { wantedBy = [ "multi-user.target" ]; description = "Nextjs Ollama LLM Ui."; after = [ "network.target" ]; environment = { HOSTNAME = cfg.hostname; PORT = toString cfg.port; NEXT_PUBLIC_OLLAMA_URL = cfg.ollamaUrl; }; serviceConfig = { ExecStart = "${lib.getExe nextjs-ollama-llm-ui}"; DynamicUser = true; }; }; }; }; meta.maintainers = with lib.maintainers; [ malteneuss ]; }
nixos/tests/all-tests.nix +1 −0 Original line number Diff line number Diff line Loading @@ -616,6 +616,7 @@ in { # TODO: put in networking.nix after the test becomes more complete networkingProxy = handleTest ./networking-proxy.nix {}; nextcloud = handleTest ./nextcloud {}; nextjs-ollama-llm-ui = runTest ./web-apps/nextjs-ollama-llm-ui.nix; nexus = handleTest ./nexus.nix {}; # TODO: Test nfsv3 + Kerberos nfs3 = handleTest ./nfs { version = 3; }; Loading