Unverified Commit 564e99c1 authored by superherointj's avatar superherointj Committed by GitHub
Browse files

Merge pull request #182445 from euank/k3s-multi-node-test

nixos/tests/k3s: add multi-node test, test basic flannel networking
parents b003ff08 b6da1d81
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -254,7 +254,7 @@ in {
  jibri = handleTest ./jibri.nix {};
  jirafeau = handleTest ./jirafeau.nix {};
  jitsi-meet = handleTest ./jitsi-meet.nix {};
  k3s-single-node = handleTest ./k3s-single-node.nix {};
  k3s = handleTest ./k3s {};
  kafka = handleTest ./kafka.nix {};
  kanidm = handleTest ./kanidm.nix {};
  kbd-setfont-decompress = handleTest ./kbd-setfont-decompress.nix {};
+9 −0
Original line number Diff line number Diff line
{ system ? builtins.currentSystem
, pkgs ? import ../../.. { inherit system; }
}:
{
  # Run a single node k3s cluster and verify a pod can run
  single-node = import ./single-node.nix { inherit system pkgs; };
  # Run a multi-node k3s cluster and verify pod networking works across nodes
  multi-node = import ./multi-node.nix { inherit system pkgs; };
}
+137 −0
Original line number Diff line number Diff line
import ../make-test-python.nix ({ pkgs, ... }:
  let
    imageEnv = pkgs.buildEnv {
      name = "k3s-pause-image-env";
      paths = with pkgs; [ tini bashInteractive coreutils socat ];
    };
    pauseImage = pkgs.dockerTools.streamLayeredImage {
      name = "test.local/pause";
      tag = "local";
      contents = imageEnv;
      config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
    };
    # A daemonset that responds 'server' on port 8000
    networkTestDaemonset = pkgs.writeText "test.yml" ''
      apiVersion: apps/v1
      kind: DaemonSet
      metadata:
        name: test
        labels:
          name: test
      spec:
        selector:
          matchLabels:
            name: test
        template:
          metadata:
            labels:
              name: test
          spec:
            containers:
            - name: test
              image: test.local/pause:local
              imagePullPolicy: Never
              resources:
                limits:
                  memory: 20Mi
              command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo server"]
    '';
    tokenFile = pkgs.writeText "token" "p@s$w0rd";
  in
  {
    name = "k3s-multi-node";

    nodes = {
      server = { pkgs, ... }: {
        environment.systemPackages = with pkgs; [ gzip jq ];
        # k3s uses enough resources the default vm fails.
        virtualisation.memorySize = 1536;
        virtualisation.diskSize = 4096;

        services.k3s = {
          inherit tokenFile;
          enable = true;
          role = "server";
          package = pkgs.k3s;
          extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local --node-ip 192.168.1.1";
        };
        networking.firewall.allowedTCPPorts = [ 6443 ];
        networking.firewall.allowedUDPPorts = [ 8472 ];
        networking.firewall.trustedInterfaces = [ "flannel.1" ];
        networking.useDHCP = false;
        networking.defaultGateway = "192.168.1.1";
        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
          { address = "192.168.1.1"; prefixLength = 24; }
        ];
      };

      agent = { pkgs, ... }: {
        virtualisation.memorySize = 1024;
        virtualisation.diskSize = 2048;
        services.k3s = {
          inherit tokenFile;
          enable = true;
          role = "agent";
          serverAddr = "https://192.168.1.1:6443";
          extraFlags = "--pause-image test.local/pause:local --node-ip 192.168.1.2";
        };
        networking.firewall.allowedTCPPorts = [ 6443 ];
        networking.firewall.allowedUDPPorts = [ 8472 ];
        networking.firewall.trustedInterfaces = [ "flannel.1" ];
        networking.useDHCP = false;
        networking.defaultGateway = "192.168.1.2";
        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
          { address = "192.168.1.2"; prefixLength = 24; }
        ];
      };
    };

    meta = with pkgs.lib.maintainers; {
      maintainers = [ euank ];
    };

    testScript = ''
      start_all()
      machines = [server, agent]
      for m in machines:
          m.wait_for_unit("k3s")

      # wait for the agent to show up
      server.wait_until_succeeds("k3s kubectl get node agent")

      for m in machines:
          m.succeed("k3s check-config")
          m.succeed(
              "${pauseImage} | k3s ctr image import -"
          )

      server.succeed("k3s kubectl cluster-info")
      # Also wait for our service account to show up; it takes a sec
      server.wait_until_succeeds("k3s kubectl get serviceaccount default")

      # Now create a pod on each node via a daemonset and verify they can talk to each other.
      server.succeed("k3s kubectl apply -f ${networkTestDaemonset}")
      server.wait_until_succeeds(f'[ "$(k3s kubectl get ds test -o json | jq .status.numberReady)" -eq {len(machines)} ]')

      # Get pod IPs
      pods = server.succeed("k3s kubectl get po -o json | jq '.items[].metadata.name' -r").splitlines()
      pod_ips = [server.succeed(f"k3s kubectl get po {name} -o json | jq '.status.podIP' -cr").strip() for name in pods]

      # Verify each server can ping each pod ip
      for pod_ip in pod_ips:
          server.succeed(f"ping -c 1 {pod_ip}")
          agent.succeed(f"ping -c 1 {pod_ip}")

      # Verify the pods can talk to each other
      resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -")
      assert resp.strip() == "server"
      resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -")
      assert resp.strip() == "server"

      # Cleanup
      server.succeed("k3s kubectl delete -f ${networkTestDaemonset}")

      for m in machines:
          m.shutdown()
    '';
  })
+4 −12
Original line number Diff line number Diff line
import ./make-test-python.nix ({ pkgs, ... }:

import ../make-test-python.nix ({ pkgs, ... }:
  let
    imageEnv = pkgs.buildEnv {
      name = "k3s-pause-image-env";
@@ -11,20 +10,12 @@ import ./make-test-python.nix ({ pkgs, ... }:
      contents = imageEnv;
      config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
    };
    # Don't use the default service account because there's a race where it may
    # not be created yet; make our own instead.
    testPodYaml = pkgs.writeText "test.yml" ''
      apiVersion: v1
      kind: ServiceAccount
      metadata:
        name: test
      ---
      apiVersion: v1
      kind: Pod
      metadata:
        name: test
      spec:
        serviceAccountName: test
        containers:
        - name: test
          image: test.local/pause:local
@@ -66,13 +57,14 @@ import ./make-test-python.nix ({ pkgs, ... }:
      machine.wait_for_unit("k3s")
      machine.succeed("k3s kubectl cluster-info")
      machine.fail("sudo -u noprivs k3s kubectl cluster-info")
      # FIXME: this fails with the current nixos kernel config; once it passes, we should uncomment it
      # machine.succeed("k3s check-config")
      machine.succeed("k3s check-config")

      machine.succeed(
          "${pauseImage} | k3s ctr image import -"
      )

      # Also wait for our service account to show up; it takes a sec
      machine.wait_until_succeeds("k3s kubectl get serviceaccount default")
      machine.succeed("k3s kubectl apply -f ${testPodYaml}")
      machine.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
      machine.succeed("k3s kubectl delete -f ${testPodYaml}")
+1 −1
Original line number Diff line number Diff line
@@ -323,7 +323,7 @@ buildGoModule rec {

  passthru.updateScript = ./update.sh;

  passthru.tests = { inherit (nixosTests) k3s-single-node; };
  passthru.tests = nixosTests.k3s;

  meta = baseMeta;
}