about summary refs log tree commit diff
path: root/nixpkgs/nixos/tests/kubernetes
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/tests/kubernetes')
-rw-r--r--nixpkgs/nixos/tests/kubernetes/base.nix108
-rw-r--r--nixpkgs/nixos/tests/kubernetes/default.nix7
-rw-r--r--nixpkgs/nixos/tests/kubernetes/dns.nix151
-rw-r--r--nixpkgs/nixos/tests/kubernetes/e2e.nix40
-rw-r--r--nixpkgs/nixos/tests/kubernetes/rbac.nix164
5 files changed, 470 insertions, 0 deletions
diff --git a/nixpkgs/nixos/tests/kubernetes/base.nix b/nixpkgs/nixos/tests/kubernetes/base.nix
new file mode 100644
index 000000000000..8cfac10b6dc4
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/base.nix
@@ -0,0 +1,108 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; }
+}:
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  mkKubernetesBaseTest =
+    { name, domain ? "my.zyx", test, machines
+    , extraConfiguration ? null }:
+    let
+      masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
+      master = machines.${masterName};
+      extraHosts = ''
+        ${master.ip}  etcd.${domain}
+        ${master.ip}  api.${domain}
+        ${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip}  ${machineName}.${domain}") (attrNames machines)}
+      '';
+      kubectl = with pkgs; runCommand "wrap-kubectl" { buildInputs = [ makeWrapper ]; } ''
+        mkdir -p $out/bin
+        makeWrapper ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl --set KUBECONFIG "/etc/kubernetes/cluster-admin.kubeconfig"
+      '';
+    in makeTest {
+      inherit name;
+
+      nodes = mapAttrs (machineName: machine:
+        { config, pkgs, lib, nodes, ... }:
+          mkMerge [
+            {
+              boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
+              virtualisation.memorySize = mkDefault 1536;
+              virtualisation.diskSize = mkDefault 4096;
+              networking = {
+                inherit domain extraHosts;
+                primaryIPAddress = mkForce machine.ip;
+
+                firewall = {
+                  allowedTCPPorts = [
+                    10250 # kubelet
+                  ];
+                  trustedInterfaces = ["docker0"];
+
+                  extraCommands = concatMapStrings  (node: ''
+                    iptables -A INPUT -s ${node.config.networking.primaryIPAddress} -j ACCEPT
+                  '') (attrValues nodes);
+                };
+              };
+              programs.bash.enableCompletion = true;
+              environment.systemPackages = [ kubectl ];
+              services.flannel.iface = "eth1";
+              services.kubernetes = {
+                addons.dashboard.enable = true;
+                proxy.hostname = "${masterName}.${domain}";
+
+                easyCerts = true;
+                inherit (machine) roles;
+                apiserver = {
+                  securePort = 443;
+                  advertiseAddress = master.ip;
+                };
+                masterAddress = "${masterName}.${config.networking.domain}";
+              };
+            }
+            (optionalAttrs (any (role: role == "master") machine.roles) {
+              networking.firewall.allowedTCPPorts = [
+                443 # kubernetes apiserver
+              ];
+            })
+            (optionalAttrs (machine ? extraConfiguration) (machine.extraConfiguration { inherit config pkgs lib nodes; }))
+            (optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
+          ]
+      ) machines;
+
+      testScript = ''
+        start_all()
+      '' + test;
+    };
+
+  mkKubernetesMultiNodeTest = attrs: mkKubernetesBaseTest ({
+    machines = {
+      machine1 = {
+        roles = ["master"];
+        ip = "192.168.1.1";
+      };
+      machine2 = {
+        roles = ["node"];
+        ip = "192.168.1.2";
+      };
+    };
+  } // attrs // {
+    name = "kubernetes-${attrs.name}-multinode";
+  });
+
+  mkKubernetesSingleNodeTest = attrs: mkKubernetesBaseTest ({
+    machines = {
+      machine1 = {
+        roles = ["master" "node"];
+        ip = "192.168.1.1";
+      };
+    };
+  } // attrs // {
+    name = "kubernetes-${attrs.name}-singlenode";
+  });
+in {
+  inherit mkKubernetesBaseTest mkKubernetesSingleNodeTest mkKubernetesMultiNodeTest;
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/default.nix b/nixpkgs/nixos/tests/kubernetes/default.nix
new file mode 100644
index 000000000000..a801759bf582
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/default.nix
@@ -0,0 +1,7 @@
+{ system ? builtins.currentSystem }:
+{
+  dns = import ./dns.nix { inherit system; };
+  # e2e = import ./e2e.nix { inherit system; };  # TODO: make it pass
+  # the following test(s) can be removed when e2e is working:
+  rbac = import ./rbac.nix { inherit system; };
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/dns.nix b/nixpkgs/nixos/tests/kubernetes/dns.nix
new file mode 100644
index 000000000000..b6cd811c5aef
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/dns.nix
@@ -0,0 +1,151 @@
+{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+  domain = "my.zyx";
+
+  redisPod = pkgs.writeText "redis-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "redis";
+    metadata.labels.name = "redis";
+    spec.containers = [{
+      name = "redis";
+      image = "redis";
+      args = ["--bind" "0.0.0.0"];
+      imagePullPolicy = "Never";
+      ports = [{
+        name = "redis-server";
+        containerPort = 6379;
+      }];
+    }];
+  });
+
+  redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
+    kind = "Service";
+    apiVersion = "v1";
+    metadata.name = "redis";
+    spec = {
+      ports = [{port = 6379; targetPort = 6379;}];
+      selector = {name = "redis";};
+    };
+  });
+
+  redisImage = pkgs.dockerTools.buildImage {
+    name = "redis";
+    tag = "latest";
+    contents = [ pkgs.redis pkgs.bind.host ];
+    config.Entrypoint = ["/bin/redis-server"];
+  };
+
+  probePod = pkgs.writeText "probe-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "probe";
+    metadata.labels.name = "probe";
+    spec.containers = [{
+      name = "probe";
+      image = "probe";
+      args = [ "-f" ];
+      tty = true;
+      imagePullPolicy = "Never";
+    }];
+  });
+
+  probeImage = pkgs.dockerTools.buildImage {
+    name = "probe";
+    tag = "latest";
+    contents = [ pkgs.bind.host pkgs.busybox ];
+    config.Entrypoint = ["/bin/tail"];
+  };
+
+  extraConfiguration = { config, pkgs, lib, ... }: {
+    environment.systemPackages = [ pkgs.bind.host ];
+    services.dnsmasq.enable = true;
+    services.dnsmasq.servers = [
+      "/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
+    ];
+  };
+
+  base = {
+    name = "dns";
+    inherit domain extraConfiguration;
+  };
+
+  singleNodeTest = {
+    test = ''
+      # prepare machine1 for test
+      machine1.wait_until_succeeds("kubectl get node machine1.${domain} | grep -w Ready")
+      machine1.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${redisImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisPod}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisService}"
+      )
+      machine1.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${probeImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${probePod}"
+      )
+
+      # check if pods are running
+      machine1.wait_until_succeeds("kubectl get pod redis | grep Running")
+      machine1.wait_until_succeeds("kubectl get pod probe | grep Running")
+      machine1.wait_until_succeeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'")
+
+      # check dns on host (dnsmasq)
+      machine1.succeed("host redis.default.svc.cluster.local")
+
+      # check dns inside the container
+      machine1.succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local")
+    '';
+  };
+
+  multiNodeTest = {
+    test = ''
+      # Node token exchange
+      machine1.wait_until_succeeds(
+          "cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret"
+      )
+      machine2.wait_until_succeeds(
+          "cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join"
+      )
+
+      # prepare machines for test
+      machine1.wait_until_succeeds("kubectl get node machine2.${domain} | grep -w Ready")
+      machine2.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${redisImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisPod}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisService}"
+      )
+      machine2.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${probeImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${probePod}"
+      )
+
+      # check if pods are running
+      machine1.wait_until_succeeds("kubectl get pod redis | grep Running")
+      machine1.wait_until_succeeds("kubectl get pod probe | grep Running")
+      machine1.wait_until_succeeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'")
+
+      # check dns on hosts (dnsmasq)
+      machine1.succeed("host redis.default.svc.cluster.local")
+      machine2.succeed("host redis.default.svc.cluster.local")
+
+      # check dns inside the container
+      machine1.succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local")
+    '';
+  };
+in {
+  singlenode = mkKubernetesSingleNodeTest (base // singleNodeTest);
+  multinode = mkKubernetesMultiNodeTest (base // multiNodeTest);
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/e2e.nix b/nixpkgs/nixos/tests/kubernetes/e2e.nix
new file mode 100644
index 000000000000..175d8413045e
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/e2e.nix
@@ -0,0 +1,40 @@
+{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+  domain = "my.zyx";
+  certs = import ./certs.nix { externalDomain = domain; kubelets = ["machine1" "machine2"]; };
+  kubeconfig = pkgs.writeText "kubeconfig.json" (builtins.toJSON {
+    apiVersion = "v1";
+    kind = "Config";
+    clusters = [{
+      name = "local";
+      cluster.certificate-authority = "${certs.master}/ca.pem";
+      cluster.server = "https://api.${domain}";
+    }];
+    users = [{
+      name = "kubelet";
+      user = {
+        client-certificate = "${certs.admin}/admin.pem";
+        client-key = "${certs.admin}/admin-key.pem";
+      };
+    }];
+    contexts = [{
+      context = {
+        cluster = "local";
+        user = "kubelet";
+      };
+      current-context = "kubelet-context";
+    }];
+  });
+
+  base = {
+    name = "e2e";
+    inherit domain certs;
+    test = ''
+      $machine1->succeed("e2e.test -kubeconfig ${kubeconfig} -provider local -ginkgo.focus '\\[Conformance\\]' -ginkgo.skip '\\[Flaky\\]|\\[Serial\\]'");
+    '';
+  };
+in {
+  singlenode = mkKubernetesSingleNodeTest base;
+  multinode = mkKubernetesMultiNodeTest base;
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/rbac.nix b/nixpkgs/nixos/tests/kubernetes/rbac.nix
new file mode 100644
index 000000000000..3fc8ed0fbe38
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/rbac.nix
@@ -0,0 +1,164 @@
+{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+
+  roServiceAccount = pkgs.writeText "ro-service-account.json" (builtins.toJSON {
+    kind = "ServiceAccount";
+    apiVersion = "v1";
+    metadata = {
+      name = "read-only";
+      namespace = "default";
+    };
+  });
+
+  roRoleBinding = pkgs.writeText "ro-role-binding.json" (builtins.toJSON {
+    apiVersion = "rbac.authorization.k8s.io/v1";
+    kind = "RoleBinding";
+    metadata = {
+      name = "read-pods";
+      namespace = "default";
+    };
+    roleRef = {
+      apiGroup = "rbac.authorization.k8s.io";
+      kind = "Role";
+      name = "pod-reader";
+    };
+    subjects = [{
+      kind = "ServiceAccount";
+      name = "read-only";
+      namespace = "default";
+    }];
+  });
+
+  roRole = pkgs.writeText "ro-role.json" (builtins.toJSON {
+    apiVersion = "rbac.authorization.k8s.io/v1";
+    kind = "Role";
+    metadata = {
+      name = "pod-reader";
+      namespace = "default";
+    };
+    rules = [{
+      apiGroups = [""];
+      resources = ["pods"];
+      verbs = ["get" "list" "watch"];
+    }];
+  });
+
+  kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "kubectl";
+    metadata.namespace = "default";
+    metadata.labels.name = "kubectl";
+    spec.serviceAccountName = "read-only";
+    spec.containers = [{
+      name = "kubectl";
+      image = "kubectl:latest";
+      command = ["/bin/tail" "-f"];
+      imagePullPolicy = "Never";
+      tty = true;
+    }];
+  });
+
+  kubectlPod2 = pkgs.writeTextDir "kubectl-pod-2.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "kubectl-2";
+    metadata.namespace = "default";
+    metadata.labels.name = "kubectl-2";
+    spec.serviceAccountName = "read-only";
+    spec.containers = [{
+      name = "kubectl-2";
+      image = "kubectl:latest";
+      command = ["/bin/tail" "-f"];
+      imagePullPolicy = "Never";
+      tty = true;
+    }];
+  });
+
+  kubectl = pkgs.runCommand "copy-kubectl" { buildInputs = [ pkgs.kubernetes ]; } ''
+    mkdir -p $out/bin
+    cp ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl
+  '';
+
+  kubectlImage = pkgs.dockerTools.buildImage {
+    name = "kubectl";
+    tag = "latest";
+    contents = [ kubectl pkgs.busybox kubectlPod2 ];
+    config.Entrypoint = ["/bin/sh"];
+  };
+
+  base = {
+    name = "rbac";
+  };
+
+  singlenode = base // {
+    test = ''
+      machine1.wait_until_succeeds("kubectl get node machine1.my.zyx | grep -w Ready")
+
+      machine1.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${kubectlImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roServiceAccount}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRole}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRoleBinding}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${kubectlPod}"
+      )
+
+      machine1.wait_until_succeeds("kubectl get pod kubectl | grep Running")
+
+      machine1.wait_until_succeeds("kubectl exec -ti kubectl -- kubectl get pods")
+      machine1.fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json")
+      machine1.fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl")
+    '';
+  };
+
+  multinode = base // {
+    test = ''
+      # Node token exchange
+      machine1.wait_until_succeeds(
+          "cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret"
+      )
+      machine2.wait_until_succeeds(
+          "cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join"
+      )
+
+      machine1.wait_until_succeeds("kubectl get node machine2.my.zyx | grep -w Ready")
+
+      machine2.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${kubectlImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roServiceAccount}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRole}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRoleBinding}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${kubectlPod}"
+      )
+
+      machine1.wait_until_succeeds("kubectl get pod kubectl | grep Running")
+
+      machine1.wait_until_succeeds("kubectl exec -ti kubectl -- kubectl get pods")
+      machine1.fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json")
+      machine1.fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl")
+    '';
+  };
+
+in {
+  singlenode = mkKubernetesSingleNodeTest singlenode;
+  multinode = mkKubernetesMultiNodeTest multinode;
+}