Unverified Commit 846c6bc2 authored by rorosen's avatar rorosen Committed by GitHub
Browse files

nixos/tests/rancher: merge auto deploy and configuration tests (#476577)

parents c2ed7da6 3b4472e7
Loading
Loading
Loading
Loading
+0 −40
Original line number Diff line number Diff line
# A test that imports k3s airgapped images and verifies that all expected images are present
{
  pkgs,
  lib,
  rancherDistro,
  rancherPackage,
  serviceName,
  disabledComponents,
  coreImages,
  vmResources,
  ...
}:
{
  name = "${rancherPackage.name}-airgap-images";

  nodes.machine = _: {
    virtualisation = vmResources;

    services.${rancherDistro} = {
      enable = true;
      role = "server";
      package = rancherPackage;
      disable = disabledComponents;
      images =
        coreImages
        ++ {
          k3s = [ rancherPackage.airgap-images ];
          rke2 = [ ]; # RKE2 already includes its airgap-images in coreImages
        }
        .${rancherDistro};
    };
  };

  testScript = ''
    machine.wait_for_unit("${serviceName}")
    machine.wait_until_succeeds("journalctl -r --no-pager -u ${serviceName} | grep \"Imported images from /var/lib/rancher/${rancherDistro}/agent/images/\"")
  '';

  meta.maintainers = lib.teams.k3s.members ++ pkgs.rke2.meta.maintainers;
}
+0 −230
Original line number Diff line number Diff line
# Tests whether container images are imported and auto deploying Helm charts,
# including the bundled traefik or ingress-nginx, work
{
  pkgs,
  lib,
  rancherDistro,
  rancherPackage,
  serviceName,
  disabledComponents,
  coreImages,
  vmResources,
  ...
}:
let
  testImageEnv = pkgs.buildEnv {
    name = "${rancherDistro}-pause-image-env";
    paths = with pkgs; [
      busybox
      hello
    ];
  };
  testImage = pkgs.dockerTools.buildImage {
    name = "test.local/test";
    tag = "local";
    # Slightly reduces the time needed to import image
    compressor = "zstd";
    copyToRoot = testImageEnv;
  };
  # pack the test helm chart as a .tgz archive
  package =
    pkgs.runCommand "${rancherDistro}-test-chart.tgz"
      {
        nativeBuildInputs = [ pkgs.kubernetes-helm ];
        chart = builtins.toJSON {
          name = "${rancherDistro}-test-chart";
          version = "0.1.0";
        };
        values = builtins.toJSON {
          restartPolicy = "Never";
          runCommand = "";
          image = {
            repository = "foo";
            tag = "1.0.0";
          };
        };
        job = builtins.toJSON {
          apiVersion = "batch/v1";
          kind = "Job";
          metadata = {
            name = "{{ .Release.Name }}";
            namespace = "{{ .Release.Namespace }}";
          };
          spec = {
            template = {
              spec = {
                containers = [
                  {
                    name = "test";
                    image = "{{ .Values.image.repository }}:{{ .Values.image.tag }}";
                    command = [ "sh" ];
                    args = [
                      "-c"
                      "{{ .Values.runCommand }}"
                    ];
                  }
                ];
                restartPolicy = "{{ .Values.restartPolicy }}";
              };
            };
          };
        };
        passAsFile = [
          "values"
          "chart"
          "job"
        ];
      }
      ''
        mkdir -p chart/templates
        cp "$chartPath" chart/Chart.yaml
        cp "$valuesPath" chart/values.yaml
        cp "$jobPath" chart/templates/job.json

        helm package chart
        mv ./*.tgz $out
      '';
  # The common Helm chart that is used in this test
  testChart = {
    inherit package;
    values = {
      runCommand = "hello";
      image = {
        repository = testImage.imageName;
        tag = testImage.imageTag;
      };
    };
  };
in
{
  name = "${rancherPackage.name}-auto-deploy-helm";

  nodes.machine =
    { pkgs, ... }:
    {
      environment.systemPackages = with pkgs; [
        kubectl
        yq-go
      ];
      environment.sessionVariables.KUBECONFIG = "/etc/rancher/${rancherDistro}/${rancherDistro}.yaml";

      virtualisation = vmResources;

      services.${rancherDistro} = {
        enable = true;
        package = rancherPackage;
        disable =
          {
            k3s = lib.remove "traefik" disabledComponents;
            rke2 = lib.remove "rke2-ingress-nginx" disabledComponents;
          }
          .${rancherDistro};
        images =
          coreImages
          # Provides the k3s Helm controller
          ++ lib.optional (rancherDistro == "k3s") rancherPackage.airgap-images
          ++ [
            testImage
          ];
        autoDeployCharts = {
          # regular test chart that should get installed
          hello = testChart;
          # disabled chart that should not get installed
          disabled = testChart // {
            enable = false;
          };
          # chart with values set via YAML file
          values-file = testChart // {
            # Remove unsafeDiscardStringContext workaround when Nix can convert a string to a path
            # https://github.com/NixOS/nix/issues/12407
            values =
              /.
              + builtins.unsafeDiscardStringContext (
                builtins.toFile "${rancherDistro}-test-chart-values.yaml" ''
                  runCommand: "echo 'Hello, file!'"
                  image:
                    repository: test.local/test
                    tag: local
                ''
              );
          };
          # advanced chart that should get installed in the "test" namespace with a custom
          # timeout and overridden values
          advanced = testChart // {
            # create the "test" namespace via extraDeploy for testing
            extraDeploy = [
              {
                apiVersion = "v1";
                kind = "Namespace";
                metadata.name = "test";
              }
            ];
            extraFieldDefinitions = {
              spec = {
                # overwrite chart values
                valuesContent = ''
                  runCommand: "echo 'advanced hello'"
                  image:
                    repository: ${testImage.imageName}
                    tag: ${testImage.imageTag}
                '';
                # overwrite the chart namespace
                targetNamespace = "test";
                # set a custom timeout
                timeout = "69s";
              };
            };
          };
        };
      };
    };

  testScript = # python
    let
      manifestFormat =
        {
          k3s = "yaml";
          rke2 = "json";
        }
        .${rancherDistro};
    in
    ''
      import json

      machine.wait_for_unit("${serviceName}")
      # check existence/absence of chart manifest files
      machine.succeed("test -e /var/lib/rancher/${rancherDistro}/server/manifests/hello.${manifestFormat}")
      machine.succeed("test ! -e /var/lib/rancher/${rancherDistro}/server/manifests/disabled.${manifestFormat}")
      machine.succeed("test -e /var/lib/rancher/${rancherDistro}/server/manifests/values-file.${manifestFormat}")
      machine.succeed("test -e /var/lib/rancher/${rancherDistro}/server/manifests/advanced.${manifestFormat}")
      # check that the timeout is set correctly, select only the first item in advanced.yaml
      advancedManifest = json.loads(machine.succeed("yq -o json '.items[0]' /var/lib/rancher/${rancherDistro}/server/manifests/advanced.${manifestFormat}"))
      t.assertEqual(advancedManifest["spec"]["timeout"], "69s", "unexpected value for spec.timeout")
      # wait for test jobs to complete
      machine.wait_until_succeeds("kubectl wait --for=condition=complete job/hello", timeout=180)
      machine.wait_until_succeeds("kubectl wait --for=condition=complete job/values-file", timeout=180)
      machine.wait_until_succeeds("kubectl -n test wait --for=condition=complete job/advanced", timeout=180)
      # check output of test jobs
      hello_output = machine.succeed("kubectl logs -l batch.kubernetes.io/job-name=hello")
      values_file_output = machine.succeed("kubectl logs -l batch.kubernetes.io/job-name=values-file")
      advanced_output = machine.succeed("kubectl -n test logs -l batch.kubernetes.io/job-name=advanced")
      # strip the output to remove trailing whitespaces
      t.assertEqual(hello_output.rstrip(), "Hello, world!", "unexpected output of hello job")
      t.assertEqual(values_file_output.rstrip(), "Hello, file!", "unexpected output of values file job")
      t.assertEqual(advanced_output.rstrip(), "advanced hello", "unexpected output of advanced job")
      # wait for bundled ingress deployment
      ${
        {
          k3s = ''
            machine.wait_until_succeeds("kubectl -n kube-system rollout status deployment traefik", timeout=180)
          '';
          rke2 = ''
            machine.wait_until_succeeds("kubectl -n kube-system rollout status daemonset rke2-ingress-nginx-controller", timeout=180)
          '';
        }
        .${rancherDistro}
      }
    '';

  meta.maintainers = lib.teams.k3s.members ++ pkgs.rke2.meta.maintainers;
}
+202 −49
Original line number Diff line number Diff line
# Tests whether container images are imported and auto deploying manifests work
# Tests whether container images are imported and auto deploy (manifests and charts) work.
# Additionally, imports airgap images and verifies deployment of the bundled reverse
# proxy (traefik or ingress-nginx)
{
  pkgs,
  lib,
@@ -11,38 +13,94 @@
  ...
}:
let
  pauseImageEnv = pkgs.buildEnv {
    name = "${rancherDistro}-pause-image-env";
  testImageEnv = pkgs.buildEnv {
    name = "${rancherDistro}-test-image-env";
    paths = with pkgs; [
      tini
      (lib.hiPrio coreutils)
      busybox
      hello
    ];
  };
  pauseImage = pkgs.dockerTools.buildImage {
    name = "test.local/pause";
  testImage = pkgs.dockerTools.buildImage {
    name = "test.local/test";
    tag = "local";
    copyToRoot = pauseImageEnv;
    config.Entrypoint = [
      "/bin/tini"
      "--"
      "/bin/sleep"
      "inf"
    ];
  };
  helloImage = pkgs.dockerTools.buildImage {
    name = "test.local/hello";
    tag = "local";
    copyToRoot = pkgs.hello;
    config.Entrypoint = [ "${pkgs.hello}/bin/hello" ];
    compressor = "zstd";
    copyToRoot = testImageEnv;
  };

  manifestFormat =
    {
      k3s = "yaml";
      rke2 = "json";
    }
    .${rancherDistro};
  # pack the test helm chart as a .tgz archive
  testChartPackage =
    pkgs.runCommand "${rancherDistro}-test-chart.tgz"
      {
        nativeBuildInputs = [ pkgs.kubernetes-helm ];
        chart = builtins.toJSON {
          name = "${rancherDistro}-test-chart";
          version = "0.1.0";
        };
        values = builtins.toJSON {
          restartPolicy = "Never";
          runCommand = "";
          image = {
            repository = "foo";
            tag = "1.0.0";
          };
        };
        job = builtins.toJSON {
          apiVersion = "batch/v1";
          kind = "Job";
          metadata = {
            name = "{{ .Release.Name }}";
            namespace = "{{ .Release.Namespace }}";
          };
          spec = {
            template = {
              spec = {
                containers = [
                  {
                    name = "test";
                    image = "{{ .Values.image.repository }}:{{ .Values.image.tag }}";
                    command = [ "sh" ];
                    args = [
                      "-c"
                      "{{ .Values.runCommand }}"
                    ];
                  }
                ];
                restartPolicy = "{{ .Values.restartPolicy }}";
              };
            };
          };
        };
        passAsFile = [
          "values"
          "chart"
          "job"
        ];
      }
      ''
        mkdir -p chart/templates
        cp "$chartPath" chart/Chart.yaml
        cp "$valuesPath" chart/values.yaml
        cp "$jobPath" chart/templates/job.json

        helm package chart
        mv ./*.tgz $out
      '';
  # The Helm chart that is used in this test
  testChart = {
    package = testChartPackage;
    values = {
      runCommand = "hello";
      image = {
        repository = testImage.imageName;
        tag = testImage.imageTag;
      };
    };
  };
in
{
  name = "${rancherPackage.name}-auto-deploy";
@@ -53,6 +111,7 @@ in
      environment.systemPackages = with pkgs; [
        kubectl
        cri-tools
        yq-go
      ];
      environment.sessionVariables.KUBECONFIG = "/etc/rancher/${rancherDistro}/${rancherDistro}.yaml";

@@ -62,16 +121,16 @@ in
        enable = true;
        role = "server";
        package = rancherPackage;
        disable = disabledComponents;
        extraFlags = [
          "--pause-image test.local/pause:local"
        ];
        images = coreImages ++ [
          pauseImage
          helloImage
        ];
        disable =
          {
            k3s = lib.remove "traefik" disabledComponents;
            rke2 = lib.remove "rke2-ingress-nginx" disabledComponents;
          }
          .${rancherDistro};
        images =
          coreImages ++ lib.optional (rancherDistro == "k3s") rancherPackage.airgap-images ++ [ testImage ];
        manifests = {
          absent = {
          manifest-absent = {
            enable = false;
            content = {
              apiVersion = "v1";
@@ -80,7 +139,7 @@ in
            };
          };

          present = {
          manifest-present = {
            target = "foo-namespace.${manifestFormat}";
            content = {
              apiVersion = "v1";
@@ -89,16 +148,17 @@ in
            };
          };

          hello.content = {
          manifest-hello.content = {
            apiVersion = "batch/v1";
            kind = "Job";
            metadata.name = "hello";
            metadata.name = "manifest-hello";
            spec = {
              template.spec = {
                containers = [
                  {
                    name = "hello";
                    image = "test.local/hello:local";
                    image = "${testImage.imageName}:${testImage.imageTag}";
                    command = [ "hello" ];
                  }
                ];
                restartPolicy = "OnFailure";
@@ -106,28 +166,121 @@ in
            };
          };
        };
        autoDeployCharts = {
          # regular test chart that should get installed
          chart-hello = testChart;
          # disabled chart that should not get installed
          chart-disabled = testChart // {
            enable = false;
          };
          # chart with values set via YAML file
          chart-values-file = testChart // {
            # Remove unsafeDiscardStringContext workaround when Nix can convert a string to a path
            # https://github.com/NixOS/nix/issues/12407
            values =
              /.
              + builtins.unsafeDiscardStringContext (
                builtins.toFile "${rancherDistro}-test-chart-values.yaml" ''
                  runCommand: "echo 'Hello, file!'"
                  image:
                    repository: test.local/test
                    tag: local
                ''
              );
          };
          # advanced chart that should get installed in the "test" namespace with a custom
          # timeout and overridden values
          chart-advanced = testChart // {
            # create the "test" namespace via extraDeploy for testing
            extraDeploy = [
              {
                apiVersion = "v1";
                kind = "Namespace";
                metadata.name = "test";
              }
            ];
            extraFieldDefinitions = {
              spec = {
                # overwrite chart values
                valuesContent = ''
                  runCommand: "echo 'advanced hello'"
                  image:
                    repository: ${testImage.imageName}
                    tag: ${testImage.imageTag}
                '';
                # overwrite the chart namespace
                targetNamespace = "test";
                # set a custom timeout
                timeout = "69s";
              };
            };
          };
        };
      };
    };

  testScript = # python
    ''
      start_all()
      import json

      machine.wait_for_unit("${serviceName}")
      # check existence of the manifest files
      machine.fail("ls /var/lib/rancher/${rancherDistro}/server/manifests/absent.${manifestFormat}")
      machine.succeed("ls /var/lib/rancher/${rancherDistro}/server/manifests/foo-namespace.${manifestFormat}")
      machine.succeed("ls /var/lib/rancher/${rancherDistro}/server/manifests/hello.${manifestFormat}")

      # check if container images got imported
      with subtest("Generation of manifest files"):
        machine.succeed("test ! -e /var/lib/rancher/${rancherDistro}/server/manifests/manifest-absent.${manifestFormat}")
        machine.succeed("test -e /var/lib/rancher/${rancherDistro}/server/manifests/foo-namespace.${manifestFormat}")
        machine.succeed("test -e /var/lib/rancher/${rancherDistro}/server/manifests/manifest-hello.${manifestFormat}")

      with subtest("Generation of chart manifest files"):
        machine.succeed("test ! -e /var/lib/rancher/${rancherDistro}/server/manifests/chart-disabled.${manifestFormat}")
        machine.succeed("test -e /var/lib/rancher/${rancherDistro}/server/manifests/chart-hello.${manifestFormat}")
        machine.succeed("test -e /var/lib/rancher/${rancherDistro}/server/manifests/chart-values-file.${manifestFormat}")
        machine.succeed("test -e /var/lib/rancher/${rancherDistro}/server/manifests/chart-advanced.${manifestFormat}")

      with subtest("Timeout of advanced chart"):
        # select only the first item in advanced.yaml
        advancedManifest = json.loads(machine.succeed("yq -o json '.items[0]' /var/lib/rancher/${rancherDistro}/server/manifests/chart-advanced.${manifestFormat}"))
        t.assertEqual(advancedManifest["spec"]["timeout"], "69s", "unexpected value for spec.timeout")

      with subtest("Container image import"):
        # for some reason, RKE2 also uses /run/k3s
      machine.wait_until_succeeds("crictl -r /run/k3s/containerd/containerd.sock img | grep 'test\.local/pause'")
      machine.wait_until_succeeds("crictl -r /run/k3s/containerd/containerd.sock img | grep 'test\.local/hello'")
        machine.wait_until_succeeds("crictl -r /run/k3s/containerd/containerd.sock img | grep 'test\.local/test'")
        machine.wait_until_succeeds("crictl -r /run/k3s/containerd/containerd.sock img | grep '^docker.io/rancher/mirrored-'")

      # check if resources of manifests got created
      with subtest("Creation of manifest resource"):
        machine.wait_until_succeeds("kubectl get ns foo")
      machine.wait_until_succeeds("kubectl wait --for=condition=complete job/hello")
        machine.wait_until_succeeds("kubectl wait --for=condition=complete job/manifest-hello")
        machine.fail("kubectl get ns absent")

      with subtest("Completion of chart test jobs"):
        machine.wait_until_succeeds("kubectl wait --for=condition=complete job/chart-hello")
        machine.wait_until_succeeds("kubectl wait --for=condition=complete job/chart-values-file")
        machine.wait_until_succeeds("kubectl -n test wait --for=condition=complete job/chart-advanced")

      with subtest("Output of manifest test job"):
        hello_output = machine.succeed("kubectl logs -l batch.kubernetes.io/job-name=manifest-hello")
        t.assertEqual(hello_output.rstrip(), "Hello, world!", "unexpected output of manifest-hello job")

      with subtest("Output of chart test jobs"):
        hello_output = machine.succeed("kubectl logs -l batch.kubernetes.io/job-name=chart-hello")
        values_file_output = machine.succeed("kubectl logs -l batch.kubernetes.io/job-name=chart-values-file")
        advanced_output = machine.succeed("kubectl -n test logs -l batch.kubernetes.io/job-name=chart-advanced")
        # strip to remove trailing whitespaces
        t.assertEqual(hello_output.rstrip(), "Hello, world!", "unexpected output of chart hello job")
        t.assertEqual(values_file_output.rstrip(), "Hello, file!", "unexpected output of chart values file job")
        t.assertEqual(advanced_output.rstrip(), "advanced hello", "unexpected output of advanced chart job")

      with subtest("Deployment of bundled reverse proxy"):
        ${
          {
            k3s = ''
              machine.wait_until_succeeds("kubectl -n kube-system rollout status deployment traefik")
            '';
            rke2 = ''
              machine.wait_until_succeeds("kubectl -n kube-system rollout status daemonset rke2-ingress-nginx-controller")
            '';
          }
          .${rancherDistro}
        }
    '';

  meta.maintainers = lib.teams.k3s.members ++ pkgs.rke2.meta.maintainers;
+22 −8
Original line number Diff line number Diff line
# A test that sets extra kubelet configuration and enables graceful node shutdown
# Tests that containerd configuration, kubelet configuration, and graceful node shutdown are
# configured correctly
{
  pkgs,
  lib,
@@ -19,9 +20,9 @@ let
  containerLogMaxSize = "5Mi";
in
{
  name = "${rancherPackage.name}-kubelet-config";
  name = "${rancherPackage.name}-configuration";
  nodes.machine =
    { pkgs, ... }:
    { ... }:
    {
      environment.systemPackages = with pkgs; [
        kubectl
@@ -37,6 +38,12 @@ in
        disable = disabledComponents;
        images = coreImages;
        inherit nodeName;
        containerdConfigTemplate = ''
          # Base ${rancherDistro} config
          {{ template "base" . }}

          # MAGIC COMMENT
        '';
        gracefulNodeShutdown = {
          enable = true;
          inherit shutdownGracePeriod shutdownGracePeriodCriticalPods;
@@ -51,19 +58,26 @@ in
    ''
      import json

      start_all()
      machine.wait_for_unit("${serviceName}")
      # wait until the node is ready
      machine.wait_until_succeeds(r"""kubectl get node ${nodeName} -ojson | jq -e '.status.conditions[] | select(.type == "Ready") | .status == "True"'""")
      # test whether the kubelet registered an inhibitor lock

      with subtest("Inhibitor lock is registered"):
        machine.succeed("systemd-inhibit --list --no-legend | grep \"^kubelet.*shutdown\"")

      with subtest("Containerd config contains magic comment"):
        out=machine.succeed("cat /var/lib/rancher/${rancherDistro}/agent/etc/containerd/config.toml.tmpl")
        t.assertIn("MAGIC COMMENT", out, "the containerd config template does not contain the magic comment")
        # config file contains the magic comment
        out=machine.succeed("cat /var/lib/rancher/${rancherDistro}/agent/etc/containerd/config.toml")
        t.assertIn("MAGIC COMMENT", out, "the containerd config does not contain the magic comment")

      # run kubectl proxy in the background, close stdout through redirection to not wait for the command to finish
      machine.execute("kubectl proxy --address 127.0.0.1 --port=8001 >&2 &")
      machine.wait_until_succeeds("nc -z 127.0.0.1 8001")
      # get the kubeletconfig
      kubelet_config=json.loads(machine.succeed("curl http://127.0.0.1:8001/api/v1/nodes/${nodeName}/proxy/configz | jq '.kubeletconfig'"))

      with subtest("Kubelet config values are set correctly"):
        kubelet_config=json.loads(machine.succeed("curl http://127.0.0.1:8001/api/v1/nodes/${nodeName}/proxy/configz | jq '.kubeletconfig'"))
        t.assertEqual(kubelet_config["shutdownGracePeriod"], "${shutdownGracePeriod}")
        t.assertEqual(kubelet_config["shutdownGracePeriodCriticalPods"], "${shutdownGracePeriodCriticalPods}")
        t.assertEqual(kubelet_config["podsPerCore"], ${toString podsPerCore})
+0 −59

File deleted.

Preview size limit exceeded, changes collapsed.

Loading