Unverified Commit ac4abf7f authored by azey's avatar azey
Browse files

nixos/rke2: merge with nixos/k3s

K3s module was moved from `nixos/.../k3s` to `nixos/.../rancher`.

`rke2/default.nix` was moved to `rancher/rke2.nix`, and some options from RKE2 were migrated into the common `default.nix` for backwards compatibility.

Manifest generation was also changed, instead of multi-doc YAML files the module now generates `kind: List` manifests.
parent 5c23a1c3
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -261,7 +261,8 @@
  - any:
      - changed-files:
          - any-glob-to-any-file:
              - nixos/modules/services/cluster/k3s/**/*
              - nixos/modules/services/cluster/rancher/default.nix
              - nixos/modules/services/cluster/rancher/k3s.nix
              - nixos/tests/k3s/**/*
              - pkgs/applications/networking/cluster/k3s/**/*

+3 −0
Original line number Diff line number Diff line
@@ -450,6 +450,9 @@ and [release notes for v18](https://goteleport.com/docs/changelog/#1800-070325).

- `services.matter-server` now hosts a debug dashboard on the configured port. Open the port on the firewall with `services.matter-server.openFirewall`.

- `services.k3s` now shares most of its code with `services.rke2`. The merge resulted in both modules providing more options, with `services.rke2` receiving the most improvements.
  Existing configurations for either module should not be affected.

- The new option [networking.ipips](#opt-networking.ipips) has been added to create IP within IP kind of tunnels (including 4in6, ip6ip6 and ipip).
  With the existing [networking.sits](#opt-networking.sits) option (6in4), it is now possible to create all combinations of IPv4 and IPv6 encapsulation.

+1 −2
Original line number Diff line number Diff line
@@ -475,7 +475,6 @@
  ./services/cluster/corosync/default.nix
  ./services/cluster/druid/default.nix
  ./services/cluster/hadoop/default.nix
  ./services/cluster/k3s/default.nix
  ./services/cluster/kubernetes/addon-manager.nix
  ./services/cluster/kubernetes/addons/dns.nix
  ./services/cluster/kubernetes/apiserver.nix
@@ -488,7 +487,7 @@
  ./services/cluster/kubernetes/scheduler.nix
  ./services/cluster/pacemaker/default.nix
  ./services/cluster/patroni/default.nix
  ./services/cluster/rke2/default.nix
  ./services/cluster/rancher/default.nix
  ./services/cluster/spark/default.nix
  ./services/cluster/temporal/default.nix
  ./services/computing/boinc/client.nix
+205 −56
Original line number Diff line number Diff line
@@ -7,26 +7,60 @@
let
  mkRancherModule =
    {
      # name used in paths/names, e.g. k3s
      name ? null,
      # name used in paths/bin names/etc, e.g. k3s
      name,
      # systemd service name
      serviceName ? name,
      # extra flags to pass to the binary before user-defined extraFlags
      extraBinFlags ? [ ],
      # generate manifests as JSON rather than YAML, see rke2.nix
      jsonManifests ? false,

      # which port on the local node hosts content placed in ${staticContentChartDir} on /static/
      # if null, it's assumed the content can be accessed via https://%{KUBERNETES_API}%/static/
      staticContentPort ? null,
    }:
    let
      cfg = config.services.${name};

      # Paths defined here are passed to the downstream modules as `paths`
      manifestDir = "/var/lib/rancher/${name}/server/manifests";
      imageDir = "/var/lib/rancher/${name}/agent/images";
      containerdConfigTemplateFile = "/var/lib/rancher/${name}/agent/etc/containerd/config.toml.tmpl";
      staticContentChartDir = "/var/lib/rancher/${name}/server/static/charts";

      yamlFormat = pkgs.formats.yaml { };
      yamlDocSeparator = builtins.toFile "yaml-doc-separator" "\n---\n";
      # Manifests need a valid YAML suffix to be respected
      manifestFormat = if jsonManifests then pkgs.formats.json { } else pkgs.formats.yaml { };
      # Manifests need a valid suffix to be respected
      mkManifestTarget =
        name: if (lib.hasSuffix ".yaml" name || lib.hasSuffix ".yml" name) then name else name + ".yaml";
        name:
        if (lib.hasSuffix ".yaml" name || lib.hasSuffix ".yml" name || lib.hasSuffix ".json" name) then
          name
        else if jsonManifests then
          name + ".json"
        else
          name + ".yaml";
      # Returns a path to the final manifest file
      mkManifestSource =
        name: manifests:
        manifestFormat.generate name (
          if builtins.isList manifests then
            {
              apiVersion = "v1";
              kind = "List";
              items = manifests;
            }
          else
            manifests
        );

      # Produces a list containing all duplicate manifest names
      duplicateManifests = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
        builtins.attrNames cfg.manifests
      );
      # Produces a list containing all duplicate chart names
      duplicateCharts = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
        builtins.attrNames cfg.charts
      );

      # Converts YAML -> JSON -> Nix
      fromYaml =
@@ -93,7 +127,7 @@ let
          x.outPath
        # x is an attribute set that needs to be converted to a YAML file
        else if builtins.isAttrs x then
          (yamlFormat.generate "extra-deploy-chart-manifest" x)
          (manifestFormat.generate "extra-deploy-chart-manifest" x)
        # assume x is a path to a YAML file
        else
          x;
@@ -118,28 +152,24 @@ let
          spec = {
            inherit valuesContent;
            inherit (value) targetNamespace createNamespace;
            chart = "https://%{KUBERNETES_API}%/static/charts/${name}.tgz";
            chart =
              if staticContentPort == null then
                "https://%{KUBERNETES_API}%/static/charts/${name}.tgz"
              else
                "https://localhost:${toString staticContentPort}/static/charts/${name}.tgz";
            bootstrap = staticContentPort != null; # needed for host network access
          };
        } value.extraFieldDefinitions;

      # Generate a HelmChart custom resource together with extraDeploy manifests. This
      # generates possibly a multi document YAML file that the auto deploy mechanism
      # deploys.
      # Generate a HelmChart custom resource together with extraDeploy manifests.
      mkAutoDeployChartManifest = name: value: {
        # target is the final name of the link created for the manifest file
        target = mkManifestTarget name;
        inherit (value) enable package;
        # source is a store path containing the complete manifest file
        source = pkgs.concatText "auto-deploy-chart-${name}.yaml" (
          [
            (yamlFormat.generate "helm-chart-manifest-${name}.yaml" (mkHelmChartCR name value))
          ]
          # alternate the YAML doc separator (---) and extraDeploy manifests to create
          # multi document YAMLs
          ++ (lib.concatMap (x: [
            yamlDocSeparator
            (mkExtraDeployManifest x)
          ]) value.extraDeploy)
        source = mkManifestSource "auto-deploy-chart-${name}" (
          lib.singleton (mkHelmChartCR name value)
          ++ builtins.map (x: fromYaml (mkExtraDeployManifest x)) value.extraDeploy
        );
      };

@@ -239,7 +269,7 @@ let
                Override default chart values via Nix expressions. This is equivalent to setting
                values in a `values.yaml` file.

                WARNING: The values (including secrets!) specified here are exposed unencrypted
                **WARNING**: The values (including secrets!) specified here are exposed unencrypted
                in the world-readable nix store.
              '';
            };
@@ -276,7 +306,7 @@ let
            };

            extraFieldDefinitions = lib.mkOption {
              inherit (yamlFormat) type;
              inherit (manifestFormat) type;
              default = { };
              example = {
                spec = {
@@ -289,7 +319,7 @@ let
              description = ''
                Extra HelmChart field definitions that are merged with the rest of the HelmChart
                custom resource. This can be used to set advanced fields or to overwrite
                generated fields. See <https://docs.k3s.io/helm#helmchart-field-definitions>
                generated fields. See <https://docs.${name}.io/helm#helmchart-field-definitions>
                for possible fields.
              '';
            };
@@ -355,18 +385,7 @@ let
            source = lib.mkIf (config.content != null) (
              let
                name' = "${name}-manifest-" + builtins.baseNameOf name;
                docName = "${name}-manifest-doc-" + builtins.baseNameOf name;
                mkSource =
                  value:
                  if builtins.isList value then
                    pkgs.concatText name' (
                      lib.concatMap (x: [
                        yamlDocSeparator
                        (yamlFormat.generate docName x)
                      ]) value
                    )
                  else
                    yamlFormat.generate name' value;
                mkSource = mkManifestSource name';
              in
              lib.mkDerivedConfig options.content mkSource
            );
@@ -375,7 +394,14 @@ let
      );
    in
    {
      paths = { inherit manifestDir imageDir containerdConfigTemplateFile; };
      paths = {
        inherit
          manifestDir
          imageDir
          containerdConfigTemplateFile
          staticContentChartDir
          ;
      };

      # interface

@@ -405,7 +431,7 @@ let
          description = ''
            The ${name} token to use when connecting to a server.

            WARNING: This option will expose your token unencrypted in the world-readable nix store.
            **WARNING**: This option will expose your token unencrypted in the world-readable nix store.
            If this is undesired use the tokenFile option instead.
          '';
          default = "";
@@ -413,7 +439,28 @@ let

        tokenFile = lib.mkOption {
          type = lib.types.nullOr lib.types.path;
          description = "File path containing ${name} token to use when connecting to the server.";
          description = "File path containing the ${name} token to use when connecting to a server.";
          default = null;
        };

        agentToken = lib.mkOption {
          type = lib.types.str;
          description = ''
            The ${name} token agents can use to connect to the server.
            This option only makes sense on server nodes (`role = server`).

            **WARNING**: This option will expose your token unencrypted in the world-readable nix store.
            If this is undesired use the tokenFile option instead.
          '';
          default = "";
        };

        agentTokenFile = lib.mkOption {
          type = lib.types.nullOr lib.types.path;
          description = ''
            File path containing the ${name} token agents can use to connect to the server.
            This option only makes sense on server nodes (`role = server`).
          '';
          default = null;
        };

@@ -441,6 +488,42 @@ let
          description = "File path containing the ${name} YAML config. This is useful when the config is generated (for example on boot).";
        };

        disable = lib.mkOption {
          type = lib.types.listOf lib.types.str;
          description = "Disable default components via the `--disable` flag.";
          default = [ ];
        };

        nodeName = lib.mkOption {
          type = lib.types.nullOr lib.types.str;
          description = "Node name.";
          default = null;
        };

        nodeLabel = lib.mkOption {
          type = lib.types.listOf lib.types.str;
          description = "Registering and starting kubelet with set of labels.";
          default = [ ];
        };

        nodeTaint = lib.mkOption {
          type = lib.types.listOf lib.types.str;
          description = "Registering kubelet with set of taints.";
          default = [ ];
        };

        nodeIP = lib.mkOption {
          type = lib.types.nullOr lib.types.str;
          description = "IPv4/IPv6 addresses to advertise for node.";
          default = null;
        };

        selinux = lib.mkOption {
          type = lib.types.bool;
          description = "Enable SELinux in containerd.";
          default = false;
        };

        manifests = lib.mkOption {
          type = lib.types.attrsOf manifestModule;
          default = { };
@@ -528,6 +611,11 @@ let
            This option only makes sense on server nodes (`role = server`).
            Read the [auto-deploying manifests docs](https://docs.k3s.io/installation/packaged-components#auto-deploying-manifests-addons)
            for further information.

            **WARNING**: If you have multiple server nodes, and set this option on more than one server,
            it is your responsibility to ensure that files stay in sync across those nodes. AddOn content is
            not synced between nodes, and ${name} cannot guarantee correct behavior if different servers attempt
            to deploy conflicting manifests.
          '';
        };

@@ -676,10 +764,37 @@ let
          '';
          description = ''
            Auto deploying Helm charts that are installed by the ${name} Helm controller. Avoid using
            attribute names that are also used in the [](#opt-services.${name}.manifests) option.
            Manifests with the same name will override auto deploying charts with the same name.
            attribute names that are also used in the [](#opt-services.${name}.manifests) and
            [](#opt-services.${name}.charts) options. Manifests with the same name will override
            auto deploying charts with the same name.
            This option only makes sense on server nodes (`role = server`). See the
            [${name} Helm documentation](https://docs.${name}.io/helm) for further information.

            **WARNING**: If you have multiple server nodes, and set this option on more than one server,
            it is your responsibility to ensure that files stay in sync across those nodes. AddOn content is
            not synced between nodes, and ${name} cannot guarantee correct behavior if different servers attempt
            to deploy conflicting manifests.
          '';
        };

        charts = lib.mkOption {
          type = with lib.types; attrsOf (either path package);
          default = { };
          example = lib.literalExpression ''
            nginx = ../charts/my-nginx-chart.tgz;
            redis = ../charts/my-redis-chart.tgz;
          '';
          description = ''
            Packaged Helm charts that are linked to {file}`${staticContentChartDir}` before ${name} starts.
            The attribute name will be used as the link target (relative to {file}`${staticContentChartDir}`).
            The specified charts will only be placed on the file system and made available via ${
              if staticContentPort == null then
                "the Kubernetes APIServer from within the cluster"
              else
                "port ${toString staticContentPort} on server nodes"
            }. See the [](#opt-services.${name}.autoDeployCharts) option and the
            [${name} Helm controller docs](https://docs.${name}.io/helm#using-the-helm-controller)
            to deploy Helm charts. This option only makes sense on server nodes (`role = server`).
          '';
        };
      };
@@ -697,13 +812,22 @@ let
          ++ (lib.optional (duplicateManifests != [ ])
            "${name}: The following auto deploying charts are overriden by manifests of the same name: ${toString duplicateManifests}."
          )
          ++ (lib.optional (duplicateCharts != [ ])
            "${name}: The following auto deploying charts are overriden by charts of the same name: ${toString duplicateCharts}."
          )
          ++ (lib.optional (cfg.role != "server" && cfg.charts != { })
            "${name}: Helm charts are only made available to the cluster on server nodes (role == server), they will be ignored by this node."
          )
          ++ (lib.optional (
            cfg.role == "agent" && cfg.configPath == null && cfg.serverAddr == ""
          ) "${name}: serverAddr or configPath (with 'server' key) should be set if role is 'agent'")
          ++ (lib.optional
            (cfg.role == "agent" && cfg.configPath == null && cfg.tokenFile == null && cfg.token == "")
            "${name}: Token or tokenFile or configPath (with 'token' or 'token-file' keys) should be set if role is 'agent'"
          );
            "${name}: token, tokenFile or configPath (with 'token' or 'token-file' keys) should be set if role is 'agent'"
          )
          ++ (lib.optional (
            cfg.role == "agent" && !(cfg.agentTokenFile != null || cfg.agentToken != "")
          ) "${name}: agentToken and agentToken should not be set if role is 'agent'");

        environment.systemPackages = [ config.services.${name}.package ];

@@ -726,6 +850,21 @@ let
                "L+".argument = "${image}";
              };
            };
            # Merge charts with charts contained in enabled auto deploying charts
            helmCharts =
              (lib.concatMapAttrs (n: v: { ${n} = v.package; }) (
                lib.filterAttrs (_: v: v.enable) cfg.autoDeployCharts
              ))
              // cfg.charts;
            # Ensure that all chart targets have a .tgz suffix
            mkChartTarget = name: if (lib.hasSuffix ".tgz" name) then name else name + ".tgz";
            # Make a systemd-tmpfiles rule for a chart
            mkChartRule = target: source: {
              name = "${staticContentChartDir}/${mkChartTarget target}";
              value = {
                "L+".argument = "${source}";
              };
            };
          in
          (lib.mapAttrs' (_: v: mkManifestRule v) enabledManifests)
          // (builtins.listToAttrs (map mkImageRule cfg.images))
@@ -733,16 +872,17 @@ let
            ${containerdConfigTemplateFile} = {
              "L+".argument = "${pkgs.writeText "config.toml.tmpl" cfg.containerdConfigTemplate}";
            };
          });
          })
          // (lib.mapAttrs' mkChartRule helmCharts);

        systemd.services.${name} =
        systemd.services.${serviceName} =
          let
            kubeletParams =
              (lib.optionalAttrs (cfg.gracefulNodeShutdown.enable) {
                inherit (cfg.gracefulNodeShutdown) shutdownGracePeriod shutdownGracePeriodCriticalPods;
              })
              // cfg.extraKubeletConfig;
            kubeletConfig = (pkgs.formats.yaml { }).generate "${name}-kubelet-config" (
            kubeletConfig = manifestFormat.generate "${name}-kubelet-config" (
              {
                apiVersion = "kubelet.config.k8s.io/v1beta1";
                kind = "KubeletConfiguration";
@@ -750,7 +890,7 @@ let
              // kubeletParams
            );

            kubeProxyConfig = (pkgs.formats.yaml { }).generate "${name}-kubeProxy-config" (
            kubeProxyConfig = manifestFormat.generate "${name}-kubeProxy-config" (
              {
                apiVersion = "kubeproxy.config.k8s.io/v1alpha1";
                kind = "KubeProxyConfiguration";
@@ -781,13 +921,22 @@ let
              LimitNPROC = "infinity";
              LimitCORE = "infinity";
              TasksMax = "infinity";
              TimeoutStartSec = 0;
              EnvironmentFile = cfg.environmentFile;
              ExecStart = lib.concatStringsSep " \\\n " (
                [ "${cfg.package}/bin/${name} ${cfg.role}" ]
                ++ (lib.optional (cfg.serverAddr != "") "--server ${cfg.serverAddr}")
                ++ (lib.optional (cfg.token != "") "--token ${cfg.token}")
                ++ (lib.optional (cfg.tokenFile != null) "--token-file ${cfg.tokenFile}")
                ++ (lib.optional (cfg.agentToken != "") "--agent-token ${cfg.agentToken}")
                ++ (lib.optional (cfg.agentTokenFile != null) "--agent-token-file ${cfg.agentTokenFile}")
                ++ (lib.optional (cfg.configPath != null) "--config ${cfg.configPath}")
                ++ (map (d: "--disable=${d}") cfg.disable)
                ++ (lib.optional (cfg.nodeName != null) "--node-name=${cfg.nodeName}")
                ++ (lib.optionals (cfg.nodeLabel != [ ]) (map (l: "--node-label=${l}") cfg.nodeLabel))
                ++ (lib.optionals (cfg.nodeTaint != [ ]) (map (t: "--node-taint=${t}") cfg.nodeTaint))
                ++ (lib.optional (cfg.nodeIP != null) "--node-ip=${cfg.nodeIP}")
                ++ (lib.optional cfg.selinux "--selinux")
                ++ (lib.optional (kubeletParams != { }) "--kubelet-arg=config=${kubeletConfig}")
                ++ (lib.optional (cfg.extraKubeProxyConfig != { }) "--kube-proxy-arg=config=${kubeProxyConfig}")
                ++ extraBinFlags
@@ -802,16 +951,16 @@ in
  imports =
    # pass mkRancherModule explicitly instead of via
    # _modules.args to prevent infinite recursion
    builtins.map (
      f:
      import f {
    let
      args = {
        inherit config lib;
        inherit mkRancherModule;
      }
    ) [ ./k3s.nix ];
      };
    in
    [
      (import ./k3s.nix args)
      (import ./rke2.nix args)
    ];

  meta.maintainers =
    with lib.maintainers;
    [ azey7f ] # modules only
    ++ lib.teams.k3s.members;
  meta.maintainers = pkgs.rke2.meta.maintainers ++ lib.teams.k3s.members;
}
+10 −64
Original line number Diff line number Diff line
@@ -22,12 +22,6 @@ let
      ]
      ++ config
    ) instruction;

  chartDir = "/var/lib/rancher/k3s/server/static/charts";
  # Produces a list containing all duplicate chart names
  duplicateCharts = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
    builtins.attrNames cfg.charts
  );
in
{
  imports = [ (removeOption [ "docker" ] "k3s docker option is no longer supported.") ];
@@ -61,6 +55,10 @@ in
      to know how to configure the firewall.
    '';

    disable.description = ''
      Disable default components, see the [K3s documentation](https://docs.k3s.io/installation/packaged-components#using-the---disable-flag).
    '';

    images = {
      example = lib.literalExpression ''
        [
@@ -76,23 +74,13 @@ in
      '';
      description = ''
        List of derivations that provide container images.
        All images are linked to {file}`${baseModule.imageDir}` before k3s starts and are consequently imported
        All images are linked to {file}`${baseModule.paths.imageDir}` before k3s starts and are consequently imported
        by the k3s agent. Consider importing the k3s airgap images archive of the k3s package in
        use, if you want to pre-provision this node with all k3s container images. This option
        only makes sense on nodes with an enabled agent.
      '';
    };

    autoDeployCharts.description = ''
      Auto deploying Helm charts that are installed by the k3s Helm controller. Avoid using
      attribute names that are also used in the [](#opt-services.k3s.manifests) and
      [](#opt-services.k3s.charts) options. Manifests with the same name will override
      auto deploying charts with the same name. Similiarly, charts with the same name will
      overwrite the Helm chart contained in auto deploying charts. This option only makes
      sense on server nodes (`role = server`). See the
      [k3s Helm documentation](https://docs.k3s.io/helm) for further information.
    '';

    # k3s-specific options

    clusterInit = lib.mkOption {
@@ -122,38 +110,16 @@ in
      default = false;
      description = "Only run the server. This option only makes sense for a server.";
    };

    charts = lib.mkOption {
      type = with lib.types; attrsOf (either path package);
      default = { };
      example = lib.literalExpression ''
        nginx = ../charts/my-nginx-chart.tgz;
        redis = ../charts/my-redis-chart.tgz;
      '';
      description = ''
        Packaged Helm charts that are linked to {file}`${chartDir}` before k3s starts.
        The attribute name will be used as the link target (relative to {file}`${chartDir}`).
        The specified charts will only be placed on the file system and made available to the
        Kubernetes APIServer from within the cluster. See the [](#opt-services.k3s.autoDeployCharts)
        option and the [k3s Helm controller docs](https://docs.k3s.io/helm#using-the-helm-controller)
        to deploy Helm charts. This option only makes sense on server nodes (`role = server`).
      '';
    };
  };

  # implementation

  config = lib.mkIf cfg.enable (
    lib.recursiveUpdate baseModule.config {
      warnings =
        (lib.optional (cfg.role != "server" && cfg.charts != { })
          "k3s: Helm charts are only made available to the cluster on server nodes (role == server), they will be ignored by this node."
        )
        ++ (lib.optional (duplicateCharts != [ ])
          "k3s: The following auto deploying charts are overriden by charts of the same name: ${toString duplicateCharts}."
        )
        ++ (lib.optional (cfg.disableAgent && cfg.images != [ ])
          "k3s: Images are only imported on nodes with an enabled agent, they will be ignored by this node."
      warnings = (
        lib.optional (
          cfg.disableAgent && cfg.images != [ ]
        ) "k3s: Images are only imported on nodes with an enabled agent, they will be ignored by this node."
      );

      assertions = [
@@ -166,26 +132,6 @@ in
          message = "k3s: clusterInit must be false if role is 'agent'";
        }
      ];

      systemd.tmpfiles.settings."10-k3s" =
        let
          # Merge charts with charts contained in enabled auto deploying charts
          helmCharts =
            (lib.concatMapAttrs (n: v: { ${n} = v.package; }) (
              lib.filterAttrs (_: v: v.enable) cfg.autoDeployCharts
            ))
            // cfg.charts;
          # Ensure that all chart targets have a .tgz suffix
          mkChartTarget = name: if (lib.hasSuffix ".tgz" name) then name else name + ".tgz";
          # Make a systemd-tmpfiles rule for a chart
          mkChartRule = target: source: {
            name = "${chartDir}/${mkChartTarget target}";
            value = {
              "L+".argument = "${source}";
            };
          };
        in
        lib.mapAttrs' (n: v: mkChartRule n v) helmCharts;
    }
  );
}
Loading