Unverified Commit fb4ce19a authored by nixpkgs-ci[bot]'s avatar nixpkgs-ci[bot] Committed by GitHub
Browse files

Merge master into staging-nixos

parents 75b1dc85 7be92c53
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -13436,6 +13436,12 @@
    githubId = 12773748;
    matrix = "@j.r:chaos.jetzt";
  };
  jujb233 = {
    name = "jujb233";
    email = "j3207068746@163.com";
    github = "jujb233";
    githubId = 191588056;
  };
  jukremer = {
    email = "nixpkgs@jankremer.eu";
    github = "jukremer";
+3 −0
Original line number Diff line number Diff line
@@ -245,6 +245,9 @@ See <https://github.com/NixOS/nixpkgs/issues/481673>.

- `programs.light` was removed from nixpkgs due to the corresponding package being unmaintained upstream. `brightnessctl` and `programs.acpilight` offer replacements.

- `ceph` has been upgraded to v20. See the [Ceph "tentacle" release notes](https://docs.ceph.com/en/latest/releases/tentacle/#v20-2-0-tentacle) for details and recommended upgrade procedure.
  Note that **upgrades of server-side components are one-way**, and downgrading e.g. an OSD from *Tentacle* to *Squid* is not just not supported but is known to break.

- The `networking.wireless` module has been security hardened by default: the `wpa_supplicant` daemon now runs under an unprivileged user with restricted access to the system.

  As part of these changes, `/etc/wpa_supplicant.conf` has been deprecated: the NixOS-generated configuration file is now linked to `/etc/wpa_supplicant/nixos.conf` and `/etc/wpa_supplicant/imperative.conf` has been added for imperatively configuring `wpa_supplicant` or when using [allowAuxiliaryImperativeNetworks](#opt-networking.wireless.allowAuxiliaryImperativeNetworks).
+44 −0
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@ let
    { pkgs, ... }:
    {
      virtualisation = {
        memorySize = 2048;
        emptyDiskImages = [
          20480
          20480
@@ -92,6 +93,10 @@ let
          cfg.osd2.name
        ];
      };
      rgw = {
        enable = true;
        daemons = [ cfg.monA.name ];
      };
    };
  };

@@ -100,6 +105,8 @@ let
  # For other ways to deploy a ceph cluster, look at the documentation at
  # https://docs.ceph.com/docs/master/
  testScript = ''
    import json

    start_all()

    monA.wait_for_unit("network.target")
@@ -194,6 +201,16 @@ let
        "ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
    )

    # Bootstrap RGW
    monA.succeed(
        "sudo -u ceph mkdir -p /var/lib/ceph/radosgw/ceph-${cfg.monA.name}",
        "ceph auth get-or-create client.${cfg.monA.name} osd 'allow rwx' mon 'allow rw' > /var/lib/ceph/radosgw/ceph-${cfg.monA.name}/keyring",
        "chown ceph:ceph /var/lib/ceph/radosgw/ceph-${cfg.monA.name}/keyring",
        "systemctl start ceph-rgw-${cfg.monA.name}",
    )
    monA.wait_for_unit("ceph-rgw-${cfg.monA.name}")
    monA.wait_for_open_port(7480)

    # Shut down ceph by stopping ceph.target.
    monA.succeed("systemctl stop ceph.target")

@@ -204,6 +221,7 @@ let
    monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
    monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
    monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
    monA.wait_for_unit("ceph-rgw-${cfg.monA.name}")

    # Ensure the cluster comes back up again
    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
@@ -222,6 +240,32 @@ let
    monA.wait_for_open_port(8080)
    monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")

    # Initialize dashboard creds
    monA.succeed(
        "echo 'foo bar baz qux' > /tmp/dashboard_pw",
        "ceph dashboard ac-user-create admin -i /tmp/dashboard_pw administrator",
        "ceph dashboard set-rgw-credentials",
    )

    # Get dashboard auth token
    auth_payload = json.dumps({"username": "admin", "password": "foo bar baz qux"})
    auth_response = json.loads(monA.succeed(
        f"curl --fail -s -X POST -H 'Accept: application/vnd.ceph.api.v1.0+json' -H 'Content-Type: application/json' -d '{auth_payload}' http://localhost:8080/api/auth",
    ))
    token = auth_response["token"]

    # Check cluster health via dashboard API
    health = json.loads(monA.succeed(
        f"curl --fail -s -H 'Accept: application/vnd.ceph.api.v1.0+json' -H 'Authorization: Bearer {token}' http://localhost:8080/api/health/minimal",
    ))
    assert health["health"]["status"] == "HEALTH_OK"

    # List daemons via REST API
    rgw_daemons = json.loads(monA.succeed(
        f"curl --fail -s -H 'Accept: application/vnd.ceph.api.v1.0+json' -H 'Authorization: Bearer {token}' http://localhost:8080/api/rgw/daemon",
    ))
    assert rgw_daemons[0]["id"] == "a"
  '';
in
{
+13 −0
Original line number Diff line number Diff line
@@ -16426,6 +16426,19 @@ final: prev: {
    meta.hydraPlatforms = [ ];
  };
  tokyodark-nvim = buildVimPlugin {
    pname = "tokyodark.nvim";
    version = "0-unstable-2025-11-13";
    src = fetchFromGitHub {
      owner = "tiagovla";
      repo = "tokyodark.nvim";
      rev = "659aff3c73dc2e0159314050a81671f0b2eaad01";
      hash = "sha256-THvvevUwK3p/aZW+FI2RNnduqWBcmWF5tueYwEY43FI=";
    };
    meta.homepage = "https://github.com/tiagovla/tokyodark.nvim/";
    meta.hydraPlatforms = [ ];
  };
  tokyonight-nvim = buildVimPlugin {
    pname = "tokyonight.nvim";
    version = "4.14.1-unstable-2026-03-24";
+1 −0
Original line number Diff line number Diff line
@@ -1262,6 +1262,7 @@ https://github.com/edkolev/tmuxline.vim/,,
https://github.com/folke/todo-comments.nvim/,,
https://github.com/freitass/todo.txt-vim/,,
https://github.com/akinsho/toggleterm.nvim/,,
https://github.com/tiagovla/tokyodark.nvim/,HEAD,
https://github.com/folke/tokyonight.nvim/,,
https://github.com/markonm/traces.vim/,,
https://github.com/LeonHeidelbach/trailblazer.nvim/,HEAD,
Loading