Commit cd96421e authored by Robert Rose's avatar Robert Rose
Browse files

nixos/k3s: refactor multi-node test

The refactoring parallelizes preliminary tasks like the node start and
import of the pause image to speed up execution of the test. It also
uniforms the usage of extraFlags for all nodes.
parent 7efd5bea
Loading
Loading
Loading
Loading
+52 −68
Original line number Diff line number Diff line
@@ -16,10 +16,10 @@ import ../make-test-python.nix (
        socat
      ];
    };
    pauseImage = pkgs.dockerTools.streamLayeredImage {
    pauseImage = pkgs.dockerTools.buildImage {
      name = "test.local/pause";
      tag = "local";
      contents = imageEnv;
      copyToRoot = imageEnv;
      config.Entrypoint = [
        "/bin/tini"
        "--"
@@ -75,6 +75,7 @@ import ../make-test-python.nix (
            enable = true;
            role = "server";
            package = k3s;
            images = [ pauseImage ];
            clusterInit = true;
            extraFlags = [
              "--disable coredns"
@@ -117,23 +118,17 @@ import ../make-test-python.nix (
            inherit tokenFile;
            enable = true;
            package = k3s;
            images = [ pauseImage ];
            serverAddr = "https://192.168.1.1:6443";
            clusterInit = false;
            extraFlags = builtins.toString [
              "--disable"
              "coredns"
              "--disable"
              "local-storage"
              "--disable"
              "metrics-server"
              "--disable"
              "servicelb"
              "--disable"
              "traefik"
              "--node-ip"
              "192.168.1.3"
              "--pause-image"
              "test.local/pause:local"
            extraFlags = [
              "--disable coredns"
              "--disable local-storage"
              "--disable metrics-server"
              "--disable servicelb"
              "--disable traefik"
              "--node-ip 192.168.1.3"
              "--pause-image test.local/pause:local"
            ];
          };
          networking.firewall.allowedTCPPorts = [
@@ -163,12 +158,11 @@ import ../make-test-python.nix (
            enable = true;
            role = "agent";
            package = k3s;
            images = [ pauseImage ];
            serverAddr = "https://192.168.1.3:6443";
            extraFlags = lib.concatStringsSep " " [
              "--pause-image"
              "test.local/pause:local"
              "--node-ip"
              "192.168.1.2"
            extraFlags = [
              "--pause-image test.local/pause:local"
              "--node-ip 192.168.1.2"
            ];
          };
          networking.firewall.allowedTCPPorts = [ 6443 ];
@@ -185,22 +179,19 @@ import ../make-test-python.nix (
        };
    };

    testScript = ''
    testScript = # python
      ''
        start_all()

        machines = [server, server2, agent]
        for m in machines:
          m.start()
            m.wait_for_unit("k3s")

      is_aarch64 = "${toString pkgs.stdenv.hostPlatform.isAarch64}" == "1"

        # wait for the agent to show up
        server.wait_until_succeeds("k3s kubectl get node agent")

        for m in machines:
            m.succeed("k3s check-config")
          m.succeed(
              "${pauseImage} | k3s ctr image import -"
          )

        server.succeed("k3s kubectl cluster-info")
        # Also wait for our service account to show up; it takes a sec
@@ -217,19 +208,12 @@ import ../make-test-python.nix (
        # Verify each server can ping each pod ip
        for pod_ip in pod_ips:
            server.succeed(f"ping -c 1 {pod_ip}")
            server2.succeed(f"ping -c 1 {pod_ip}")
            agent.succeed(f"ping -c 1 {pod_ip}")

            # Verify the pods can talk to each other
      resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -")
            for pod in pods:
                resp = server.succeed(f"k3s kubectl exec {pod} -- socat TCP:{pod_ip}:8000 -")
                assert resp.strip() == "server"
      resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -")
      assert resp.strip() == "server"

      # Cleanup
      server.succeed("k3s kubectl delete -f ${networkTestDaemonset}")

      for m in machines:
          m.shutdown()
      '';

    meta.maintainers = lib.teams.k3s.members;