Unverified Commit 63ba1040 authored by github-actions[bot]'s avatar github-actions[bot] Committed by GitHub
Browse files

Merge master into staging-next

parents 3fc613c5 79aa627b
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -265,6 +265,8 @@ The module update takes care of the new config syntax and the data itself (user

- The `cawbird` package is dropped from nixpkgs, as it got broken by the Twitter API closing down and has been abandoned upstream.

- `hardware.nvidia` gained `datacenter` options for enabling NVIDIA Data Center drivers and configuration of NVLink/NVSwitch topologies through `nv-fabricmanager`.

- Certificate generation via the `security.acme` now limits the concurrent number of running certificate renewals and generation jobs, to avoid spiking resource usage when processing many certificates at once. The limit defaults to *5* and can be adjusted via `maxConcurrentRenewals`. Setting it to *0* disables the limits altogether.

- New `boot.bcache.enable` (default enabled) allows completely removing `bcache` mount support.
+328 −222
Original line number Diff line number Diff line
@@ -4,8 +4,10 @@
  pkgs,
  ...
}: let
  x11Enabled = config.services.xserver.enable
               && (lib.elem "nvidia" config.services.xserver.videoDrivers);
  nvidia_x11 =
    if (lib.elem "nvidia" config.services.xserver.videoDrivers)
    if  x11Enabled || cfg.datacenter.enable
    then cfg.package
    else null;

@@ -18,9 +20,64 @@
  primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
  busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
  ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
  settingsFormat = pkgs.formats.keyValue {};
in {
  options = {
    hardware.nvidia = {
      datacenter.enable = lib.mkEnableOption (lib.mdDoc ''
        Data Center drivers for NVIDIA cards on a NVLink topology.
      '');
      datacenter.settings = lib.mkOption {
        type = settingsFormat.type;
        default = {
          LOG_LEVEL=4;
          LOG_FILE_NAME="/var/log/fabricmanager.log";
          LOG_APPEND_TO_LOG=1;
          LOG_FILE_MAX_SIZE=1024;
          LOG_USE_SYSLOG=0;
          DAEMONIZE=1;
          BIND_INTERFACE_IP="127.0.0.1";
          STARTING_TCP_PORT=16000;
          FABRIC_MODE=0;
          FABRIC_MODE_RESTART=0;
          STATE_FILE_NAME="/var/tmp/fabricmanager.state";
          FM_CMD_BIND_INTERFACE="127.0.0.1";
          FM_CMD_PORT_NUMBER=6666;
          FM_STAY_RESIDENT_ON_FAILURES=0;
          ACCESS_LINK_FAILURE_MODE=0;
          TRUNK_LINK_FAILURE_MODE=0;
          NVSWITCH_FAILURE_MODE=0;
          ABORT_CUDA_JOBS_ON_FM_EXIT=1;
          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
        };
        defaultText = lib.literalExpression ''
        {
          LOG_LEVEL=4;
          LOG_FILE_NAME="/var/log/fabricmanager.log";
          LOG_APPEND_TO_LOG=1;
          LOG_FILE_MAX_SIZE=1024;
          LOG_USE_SYSLOG=0;
          DAEMONIZE=1;
          BIND_INTERFACE_IP="127.0.0.1";
          STARTING_TCP_PORT=16000;
          FABRIC_MODE=0;
          FABRIC_MODE_RESTART=0;
          STATE_FILE_NAME="/var/tmp/fabricmanager.state";
          FM_CMD_BIND_INTERFACE="127.0.0.1";
          FM_CMD_PORT_NUMBER=6666;
          FM_STAY_RESIDENT_ON_FAILURES=0;
          ACCESS_LINK_FAILURE_MODE=0;
          TRUNK_LINK_FAILURE_MODE=0;
          NVSWITCH_FAILURE_MODE=0;
          ABORT_CUDA_JOBS_ON_FM_EXIT=1;
          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
        }
        '';
        description = lib.mdDoc ''
          Additional configuration options for fabricmanager.
        '';
      };

      powerManagement.enable = lib.mkEnableOption (lib.mdDoc ''
        experimental power management through systemd. For more information, see
        the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
@@ -167,9 +224,15 @@ in {
        It also drastically increases the time the driver needs to clock down after load.
      '');

      package = lib.mkPackageOptionMD config.boot.kernelPackages.nvidiaPackages "nvidia_x11" {
        default = "stable";
      package = lib.mkOption {
        default = config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
        defaultText = lib.literalExpression ''
          config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
        '';
        example = lib.mdDoc "config.boot.kernelPackages.nvidiaPackages.legacy_470";
        description = lib.mdDoc ''
          The NVIDIA driver package to use.
        '';
      };

      open = lib.mkEnableOption (lib.mdDoc ''
@@ -188,7 +251,45 @@ in {
      then pCfg.intelBusId
      else pCfg.amdgpuBusId;
  in
    lib.mkIf (nvidia_x11 != null) {
    lib.mkIf (nvidia_x11 != null) (lib.mkMerge [
      # Common
      ({
        assertions = [
          {
            assertion = !(x11Enabled && cfg.datacenter.enable);
            message = "You cannot configure both X11 and Data Center drivers at the same time.";
          }
        ];
        boot = {
          blacklistedKernelModules = ["nouveau" "nvidiafb"];
          kernelModules = [ "nvidia-uvm" ];
        };
        systemd.tmpfiles.rules =
          lib.optional config.virtualisation.docker.enableNvidia
            "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
        services.udev.extraRules =
        ''
          # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 255'"
          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) $${i}; done'"
          KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 254'"
          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 1'"
        '';
        hardware.opengl = {
          extraPackages = [
            nvidia_x11.out
          ];
          extraPackages32 = [
            nvidia_x11.lib32
          ];
        };
        environment.systemPackages = [
          nvidia_x11.bin
        ];
      })
      # X11
      (lib.mkIf x11Enabled {
        assertions = [
        {
          assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
@@ -248,8 +349,7 @@ in {
        {
          assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
          message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
        }
      ];
        }];

        # If Optimus/PRIME is enabled, we:
        # - Specify the configured NVIDIA GPU bus ID in the Device section for the
@@ -345,17 +445,14 @@ in {

        hardware.opengl = {
          extraPackages = [
          nvidia_x11.out
            pkgs.nvidia-vaapi-driver
          ];
          extraPackages32 = [
          nvidia_x11.lib32
            pkgs.pkgsi686Linux.nvidia-vaapi-driver
          ];
        };
        environment.systemPackages =
        [nvidia_x11.bin]
        ++ lib.optional cfg.nvidiaSettings nvidia_x11.settings
          lib.optional cfg.nvidiaSettings nvidia_x11.settings
          ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
          ++ lib.optional offloadCfg.enableOffloadCmd
          (pkgs.writeShellScriptBin "nvidia-offload" ''
@@ -420,7 +517,6 @@ in {
              };
            })
          ];

        services.acpid.enable = true;

        services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
@@ -428,23 +524,17 @@ in {
        hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;

        systemd.tmpfiles.rules =
        lib.optional config.virtualisation.docker.enableNvidia
        "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
        ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
          lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
          "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";

        boot = {
        blacklistedKernelModules = ["nouveau" "nvidiafb"];

          extraModulePackages =
            if cfg.open
            then [nvidia_x11.open]
            else [nvidia_x11.bin];

          # nvidia-uvm is required by CUDA applications.
          kernelModules =
          ["nvidia-uvm"]
          ++ lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
            lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];

          # If requested enable modesetting via kernel parameter.
          kernelParams =
@@ -458,17 +548,8 @@ in {
            options nvidia "NVreg_DynamicPowerManagement=0x02"
          '';
        };

        services.udev.extraRules =
        ''
          # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 255'"
          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) $${i}; done'"
          KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 254'"
          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 1'"
        ''
        + lib.optionalString cfg.powerManagement.finegrained (
          lib.optionalString cfg.powerManagement.finegrained (
          lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
            # Remove NVIDIA USB xHCI Host Controller devices, if present
            ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
@@ -489,5 +570,30 @@ in {
            ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
          ''
        );
      })
      # Data Center
      (lib.mkIf (cfg.datacenter.enable) {
        boot.extraModulePackages = [
          nvidia_x11.bin
        ];
        systemd.services.nvidia-fabricmanager = {
          enable = true;
          description = "Start NVIDIA NVLink Management";
          wantedBy = [ "multi-user.target" ];
          unitConfig.After = [ "network-online.target" ];
          unitConfig.Requires = [ "network-online.target" ];
          serviceConfig = {
            Type = "forking";
            TimeoutStartSec = 240;
            ExecStart = let
              nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
              in
                nvidia_x11.fabricmanager + "/bin/nv-fabricmanager -c " + nv-fab-conf;
            LimitCORE="infinity";
          };
        };
        environment.systemPackages =
          lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager;
      })
    ]);
}
+1 −0
Original line number Diff line number Diff line
@@ -1250,6 +1250,7 @@
  ./services/web-apps/matomo.nix
  ./services/web-apps/mattermost.nix
  ./services/web-apps/mediawiki.nix
  ./services/web-apps/meme-bingo-web.nix
  ./services/web-apps/miniflux.nix
  ./services/web-apps/monica.nix
  ./services/web-apps/moodle.nix
+93 −0
Original line number Diff line number Diff line
{ config, lib, pkgs, ... }:

let
  inherit (lib) mkEnableOption mkIf mkOption mdDoc types literalExpression;

  cfg = config.services.meme-bingo-web;
in {
  options = {
    services.meme-bingo-web = {
      enable = mkEnableOption (mdDoc ''
        A web app for the meme bingo, rendered entirely on the web server and made interactive with forms.

        Note: The application's author suppose to run meme-bingo-web behind a reverse proxy for SSL and HTTP/3.
      '');

      package = mkOption {
        type = types.package;
        default = pkgs.meme-bingo-web;
        defaultText = literalExpression "pkgs.meme-bingo-web";
        description = mdDoc "meme-bingo-web package to use.";
      };

      baseUrl = mkOption {
        description = mdDoc ''
          URL to be used for the HTML <base> element on all HTML routes.
        '';
        type = types.str;
        default = "http://localhost:41678/";
        example = "https://bingo.example.com/";
      };
      port = mkOption {
        description = mdDoc ''
          Port to be used for the web server.
        '';
        type = types.port;
        default = 41678;
        example = 21035;
      };
    };
  };

  config = mkIf cfg.enable {
    systemd.services.meme-bingo-web = {
      description = "A web app for playing meme bingos.";
      wantedBy = [ "multi-user.target" ];

      environment = {
        MEME_BINGO_BASE = cfg.baseUrl;
        MEME_BINGO_PORT = toString cfg.port;
      };
      path = [ cfg.package ];

      serviceConfig = {
        User = "meme-bingo-web";
        Group = "meme-bingo-web";

        DynamicUser = true;

        ExecStart = "${cfg.package}/bin/meme-bingo-web";

        Restart = "always";
        RestartSec = 1;

        # Hardening
        CapabilityBoundingSet = [ "" ];
        DeviceAllow = [ "/dev/random" ];
        LockPersonality = true;
        PrivateDevices = true;
        PrivateUsers = true;
        ProcSubset = "pid";
        ProtectSystem = "strict";
        ProtectClock = true;
        ProtectControlGroups = true;
        ProtectHome = true;
        ProtectHostname = true;
        ProtectKernelLogs = true;
        ProtectKernelModules = true;
        ProtectKernelTunables = true;
        ProtectProc = "invisible";
        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
        RestrictNamespaces = true;
        RestrictRealtime = true;
        SystemCallArchitectures = "native";
        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
        UMask = "0077";
        RestrictSUIDSGID = true;
        RemoveIPC = true;
        NoNewPrivileges = true;
        MemoryDenyWriteExecute = true;
      };
    };
  };
}
+1 −1
Original line number Diff line number Diff line
@@ -9,7 +9,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
      { pkgs, ... }:

      {
        services.postgresql = let mypg = pkgs.postgresql_11; in {
        services.postgresql = let mypg = pkgs.postgresql; in {
            enable = true;
            package = mypg;
            extraPlugins = with mypg.pkgs; [
Loading