about summary refs log tree commit diff
path: root/nixpkgs/nixos/tests/kubernetes
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2019-01-07 02:18:36 +0000
committerAlyssa Ross <hi@alyssa.is>2019-01-07 02:18:47 +0000
commit36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2 (patch)
treeb3faaf573407b32aa645237a4d16b82778a39a92 /nixpkgs/nixos/tests/kubernetes
parent4e31070265257dc67d120c27e0f75c2344fdfa9a (diff)
parentabf060725d7614bd3b9f96764262dfbc2f9c2199 (diff)
downloadnixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar.gz
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar.bz2
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar.lz
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar.xz
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.tar.zst
nixlib-36f56d99fa0a0765c9f1de4a5f17a9b05830c3f2.zip
Add 'nixpkgs/' from commit 'abf060725d7614bd3b9f96764262dfbc2f9c2199'
git-subtree-dir: nixpkgs
git-subtree-mainline: 4e31070265257dc67d120c27e0f75c2344fdfa9a
git-subtree-split: abf060725d7614bd3b9f96764262dfbc2f9c2199
Diffstat (limited to 'nixpkgs/nixos/tests/kubernetes')
-rw-r--r--nixpkgs/nixos/tests/kubernetes/base.nix115
-rw-r--r--nixpkgs/nixos/tests/kubernetes/certs.nix219
-rw-r--r--nixpkgs/nixos/tests/kubernetes/default.nix7
-rw-r--r--nixpkgs/nixos/tests/kubernetes/dns.nix127
-rw-r--r--nixpkgs/nixos/tests/kubernetes/e2e.nix40
-rw-r--r--nixpkgs/nixos/tests/kubernetes/kubernetes-common.nix57
-rw-r--r--nixpkgs/nixos/tests/kubernetes/rbac.nix137
7 files changed, 702 insertions, 0 deletions
diff --git a/nixpkgs/nixos/tests/kubernetes/base.nix b/nixpkgs/nixos/tests/kubernetes/base.nix
new file mode 100644
index 000000000000..9d77be131751
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/base.nix
@@ -0,0 +1,115 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; }
+}:
+
+with import ../../lib/testing.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  mkKubernetesBaseTest =
+    { name, domain ? "my.zyx", test, machines
+    , pkgs ? import <nixpkgs> { inherit system; }
+    , certs ? import ./certs.nix { inherit pkgs; externalDomain = domain; kubelets = attrNames machines; }
+    , extraConfiguration ? null }:
+    let
+      masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
+      master = machines.${masterName};
+      extraHosts = ''
+        ${master.ip}  etcd.${domain}
+        ${master.ip}  api.${domain}
+        ${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip}  ${machineName}.${domain}") (attrNames machines)}
+      '';
+    in makeTest {
+      inherit name;
+
+      nodes = mapAttrs (machineName: machine:
+        { config, pkgs, lib, nodes, ... }:
+          mkMerge [
+            {
+              virtualisation.memorySize = mkDefault 1536;
+              virtualisation.diskSize = mkDefault 4096;
+              networking = {
+                inherit domain extraHosts;
+                primaryIPAddress = mkForce machine.ip;
+
+                firewall = {
+                  allowedTCPPorts = [
+                    10250 # kubelet
+                  ];
+                  trustedInterfaces = ["docker0"];
+
+                  extraCommands = concatMapStrings  (node: ''
+                    iptables -A INPUT -s ${node.config.networking.primaryIPAddress} -j ACCEPT
+                  '') (attrValues nodes);
+                };
+              };
+              programs.bash.enableCompletion = true;
+              environment.variables = {
+                ETCDCTL_CERT_FILE = "${certs.worker}/etcd-client.pem";
+                ETCDCTL_KEY_FILE = "${certs.worker}/etcd-client-key.pem";
+                ETCDCTL_CA_FILE = "${certs.worker}/ca.pem";
+                ETCDCTL_PEERS = "https://etcd.${domain}:2379";
+              };
+              services.flannel.iface = "eth1";
+              services.kubernetes.apiserver.advertiseAddress = master.ip;
+            }
+            (optionalAttrs (any (role: role == "master") machine.roles) {
+              networking.firewall.allowedTCPPorts = [
+                2379 2380  # etcd
+                443 # kubernetes apiserver
+              ];
+              services.etcd = {
+                enable = true;
+                certFile = "${certs.master}/etcd.pem";
+                keyFile = "${certs.master}/etcd-key.pem";
+                trustedCaFile = "${certs.master}/ca.pem";
+                peerClientCertAuth = true;
+                listenClientUrls = ["https://0.0.0.0:2379"];
+                listenPeerUrls = ["https://0.0.0.0:2380"];
+                advertiseClientUrls = ["https://etcd.${config.networking.domain}:2379"];
+                initialCluster = ["${masterName}=https://etcd.${config.networking.domain}:2380"];
+                initialAdvertisePeerUrls = ["https://etcd.${config.networking.domain}:2380"];
+              };
+            })
+            (import ./kubernetes-common.nix { inherit (machine) roles; inherit pkgs config certs; })
+            (optionalAttrs (machine ? "extraConfiguration") (machine.extraConfiguration { inherit config pkgs lib nodes; }))
+            (optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
+          ]
+      ) machines;
+
+      testScript = ''
+        startAll;
+
+        ${test}
+      '';
+    };
+
+  mkKubernetesMultiNodeTest = attrs: mkKubernetesBaseTest ({
+    machines = {
+      machine1 = {
+        roles = ["master"];
+        ip = "192.168.1.1";
+      };
+      machine2 = {
+        roles = ["node"];
+        ip = "192.168.1.2";
+      };
+    };
+  } // attrs // {
+    name = "kubernetes-${attrs.name}-multinode";
+  });
+
+  mkKubernetesSingleNodeTest = attrs: mkKubernetesBaseTest ({
+    machines = {
+      machine1 = {
+        roles = ["master" "node"];
+        ip = "192.168.1.1";
+      };
+    };
+  } // attrs // {
+    name = "kubernetes-${attrs.name}-singlenode";
+  });
+in {
+  inherit mkKubernetesBaseTest mkKubernetesSingleNodeTest mkKubernetesMultiNodeTest;
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/certs.nix b/nixpkgs/nixos/tests/kubernetes/certs.nix
new file mode 100644
index 000000000000..85e92f6330c9
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/certs.nix
@@ -0,0 +1,219 @@
+{
+  pkgs ? import <nixpkgs> {},
+  externalDomain ? "myawesomecluster.cluster.yourdomain.net",
+  serviceClusterIp ? "10.0.0.1",
+  kubelets,
+  ...
+}:
+let
+   runWithCFSSL = name: cmd:
+     let secrets = pkgs.runCommand "${name}-cfss.json" {
+         buildInputs = [ pkgs.cfssl pkgs.jq ];
+         outputs = [ "out" "cert" "key" "csr" ];
+       }
+       ''
+         (
+           echo "${cmd}"
+           cfssl ${cmd} > tmp
+           cat tmp | jq -r .key > $key
+           cat tmp | jq -r .cert > $cert
+           cat tmp | jq -r .csr > $csr
+
+           touch $out
+         ) 2>&1 | fold -w 80 -s
+       '';
+     in {
+       key = secrets.key;
+       cert = secrets.cert;
+       csr = secrets.csr;
+     };
+
+   writeCFSSL = content:
+     pkgs.runCommand content.name {
+      buildInputs = [ pkgs.cfssl pkgs.jq ];
+     } ''
+       mkdir -p $out
+       cd $out
+
+       json=${pkgs.lib.escapeShellArg (builtins.toJSON content)}
+
+       # for a given $field in the $json, treat the associated value as a
+       # file path and substitute the contents thereof into the $json
+       # object.
+       expandFileField() {
+         local field=$1
+         if jq -e --arg field "$field" 'has($field)'; then
+           local path="$(echo "$json" | jq -r ".$field")"
+           json="$(echo "$json" | jq --arg val "$(cat "$path")" ".$field = \$val")"
+         fi
+       }
+
+       expandFileField key
+       expandFileField ca
+       expandFileField cert
+
+       echo "$json" | cfssljson -bare ${content.name}
+     '';
+
+  noCSR = content: pkgs.lib.filterAttrs (n: v: n != "csr") content;
+  noKey = content: pkgs.lib.filterAttrs (n: v: n != "key") content;
+
+  writeFile = content:
+    if pkgs.lib.isDerivation content
+    then content
+    else pkgs.writeText "content" (builtins.toJSON content);
+
+  createServingCertKey = { ca, cn, hosts? [], size ? 2048, name ? cn }:
+    noCSR (
+      (runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=server -config=${writeFile ca.config} ${writeFile {
+        CN = cn;
+        hosts = hosts;
+        key = { algo = "rsa"; inherit size; };
+      }}") // { inherit name; }
+    );
+
+  createClientCertKey = { ca, cn, groups ? [], size ? 2048, name ? cn }:
+    noCSR (
+      (runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=client -config=${writeFile ca.config} ${writeFile {
+        CN = cn;
+        names = map (group: {O = group;}) groups;
+        hosts = [""];
+        key = { algo = "rsa"; inherit size; };
+      }}") // { inherit name; }
+    );
+
+  createSigningCertKey = { C ? "xx", ST ? "x", L ? "x", O ? "x", OU ? "x", CN ? "ca", emailAddress ? "x", expiry ? "43800h", size ? 2048, name ? CN }:
+    (noCSR (runWithCFSSL CN "genkey -initca ${writeFile {
+      key = { algo = "rsa"; inherit size; };
+      names = [{ inherit C ST L O OU CN emailAddress; }];
+    }}")) // {
+      inherit name;
+      config.signing = {
+        default.expiry = expiry;
+        profiles = {
+          server = {
+            inherit expiry;
+            usages = [
+              "signing"
+              "key encipherment"
+              "server auth"
+            ];
+          };
+          client = {
+            inherit expiry;
+            usages = [
+              "signing"
+              "key encipherment"
+              "client auth"
+            ];
+          };
+          peer = {
+            inherit expiry;
+            usages = [
+              "signing"
+              "key encipherment"
+              "server auth"
+              "client auth"
+            ];
+          };
+        };
+      };
+    };
+
+  ca = createSigningCertKey {};
+
+  kube-apiserver = createServingCertKey {
+    inherit ca;
+    cn = "kube-apiserver";
+    hosts = ["kubernetes.default" "kubernetes.default.svc" "localhost" "api.${externalDomain}" serviceClusterIp];
+  };
+
+  kubelet = createServingCertKey {
+    inherit ca;
+    cn = "kubelet";
+    hosts = ["*.${externalDomain}"];
+  };
+
+  service-accounts = createServingCertKey {
+    inherit ca;
+    cn = "kube-service-accounts";
+  };
+
+  etcd = createServingCertKey {
+    inherit ca;
+    cn = "etcd";
+    hosts = ["etcd.${externalDomain}"];
+  };
+
+  etcd-client = createClientCertKey {
+    inherit ca;
+    cn = "etcd-client";
+  };
+
+  kubelet-client = createClientCertKey {
+    inherit ca;
+    cn = "kubelet-client";
+    groups = ["system:masters"];
+  };
+
+  apiserver-client = {
+    kubelet = hostname: createClientCertKey {
+      inherit ca;
+      name = "apiserver-client-kubelet-${hostname}";
+      cn = "system:node:${hostname}.${externalDomain}";
+      groups = ["system:nodes"];
+    };
+
+    kube-proxy = createClientCertKey {
+      inherit ca;
+      name = "apiserver-client-kube-proxy";
+      cn = "system:kube-proxy";
+      groups = ["system:kube-proxy" "system:nodes"];
+    };
+
+    kube-controller-manager = createClientCertKey {
+      inherit ca;
+      name = "apiserver-client-kube-controller-manager";
+      cn = "system:kube-controller-manager";
+      groups = ["system:masters"];
+    };
+
+    kube-scheduler = createClientCertKey {
+      inherit ca;
+      name = "apiserver-client-kube-scheduler";
+      cn = "system:kube-scheduler";
+      groups = ["system:kube-scheduler"];
+    };
+
+    admin = createClientCertKey {
+      inherit ca;
+      cn = "admin";
+      groups = ["system:masters"];
+    };
+  };
+in {
+  master = pkgs.buildEnv {
+    name = "master-keys";
+    paths = [
+      (writeCFSSL (noKey ca))
+      (writeCFSSL kube-apiserver)
+      (writeCFSSL kubelet-client)
+      (writeCFSSL apiserver-client.kube-controller-manager)
+      (writeCFSSL apiserver-client.kube-scheduler)
+      (writeCFSSL service-accounts)
+      (writeCFSSL etcd)
+    ];
+  };
+
+  worker = pkgs.buildEnv {
+    name = "worker-keys";
+    paths = [
+      (writeCFSSL (noKey ca))
+      (writeCFSSL kubelet)
+      (writeCFSSL apiserver-client.kube-proxy)
+      (writeCFSSL etcd-client)
+    ] ++ map (hostname: writeCFSSL (apiserver-client.kubelet hostname)) kubelets;
+  };
+
+  admin = writeCFSSL apiserver-client.admin;
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/default.nix b/nixpkgs/nixos/tests/kubernetes/default.nix
new file mode 100644
index 000000000000..a801759bf582
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/default.nix
@@ -0,0 +1,7 @@
+{ system ? builtins.currentSystem }:
+{
+  dns = import ./dns.nix { inherit system; };
+  # e2e = import ./e2e.nix { inherit system; };  # TODO: make it pass
+  # the following test(s) can be removed when e2e is working:
+  rbac = import ./rbac.nix { inherit system; };
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/dns.nix b/nixpkgs/nixos/tests/kubernetes/dns.nix
new file mode 100644
index 000000000000..f25ea5b9ed84
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/dns.nix
@@ -0,0 +1,127 @@
+{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+  domain = "my.zyx";
+
+  certs = import ./certs.nix { externalDomain = domain; kubelets = [ "machine1" "machine2" ]; };
+
+  redisPod = pkgs.writeText "redis-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "redis";
+    metadata.labels.name = "redis";
+    spec.containers = [{
+      name = "redis";
+      image = "redis";
+      args = ["--bind" "0.0.0.0"];
+      imagePullPolicy = "Never";
+      ports = [{
+        name = "redis-server";
+        containerPort = 6379;
+      }];
+    }];
+  });
+
+  redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
+    kind = "Service";
+    apiVersion = "v1";
+    metadata.name = "redis";
+    spec = {
+      ports = [{port = 6379; targetPort = 6379;}];
+      selector = {name = "redis";};
+    };
+  });
+
+  redisImage = pkgs.dockerTools.buildImage {
+    name = "redis";
+    tag = "latest";
+    contents = [ pkgs.redis pkgs.bind.host ];
+    config.Entrypoint = "/bin/redis-server";
+  };
+
+  probePod = pkgs.writeText "probe-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "probe";
+    metadata.labels.name = "probe";
+    spec.containers = [{
+      name = "probe";
+      image = "probe";
+      args = [ "-f" ];
+      tty = true;
+      imagePullPolicy = "Never";
+    }];
+  });
+
+  probeImage = pkgs.dockerTools.buildImage {
+    name = "probe";
+    tag = "latest";
+    contents = [ pkgs.bind.host pkgs.busybox ];
+    config.Entrypoint = "/bin/tail";
+  };
+
+  extraConfiguration = { config, pkgs, ... }: {
+    environment.systemPackages = [ pkgs.bind.host ];
+    # virtualisation.docker.extraOptions = "--dns=${config.services.kubernetes.addons.dns.clusterIp}";
+    services.dnsmasq.enable = true;
+    services.dnsmasq.servers = [
+      "/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
+    ];
+  };
+
+  base = {
+    name = "dns";
+    inherit domain certs extraConfiguration;
+  };
+
+  singleNodeTest = {
+    test = ''
+      # prepare machine1 for test
+      $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
+      $machine1->execute("docker load < ${redisImage}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
+      $machine1->execute("docker load < ${probeImage}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
+
+      # check if pods are running
+      $machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
+      $machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
+      $machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'");
+
+      # check dns on host (dnsmasq)
+      $machine1->succeed("host redis.default.svc.cluster.local");
+
+      # check dns inside the container
+      $machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
+    '';
+  };
+
+  multiNodeTest = {
+    test = ''
+      # prepare machines for test
+      $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
+      $machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
+      $machine2->execute("docker load < ${redisImage}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
+      $machine2->execute("docker load < ${probeImage}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
+
+      # check if pods are running
+      $machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
+      $machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
+      $machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'");
+
+      # check dns on hosts (dnsmasq)
+      $machine1->succeed("host redis.default.svc.cluster.local");
+      $machine2->succeed("host redis.default.svc.cluster.local");
+
+      # check dns inside the container
+      $machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
+    '';
+  };
+in {
+  singlenode = mkKubernetesSingleNodeTest (base // singleNodeTest);
+  multinode = mkKubernetesMultiNodeTest (base // multiNodeTest);
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/e2e.nix b/nixpkgs/nixos/tests/kubernetes/e2e.nix
new file mode 100644
index 000000000000..175d8413045e
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/e2e.nix
@@ -0,0 +1,40 @@
+{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+  domain = "my.zyx";
+  certs = import ./certs.nix { externalDomain = domain; kubelets = ["machine1" "machine2"]; };
+  kubeconfig = pkgs.writeText "kubeconfig.json" (builtins.toJSON {
+    apiVersion = "v1";
+    kind = "Config";
+    clusters = [{
+      name = "local";
+      cluster.certificate-authority = "${certs.master}/ca.pem";
+      cluster.server = "https://api.${domain}";
+    }];
+    users = [{
+      name = "kubelet";
+      user = {
+        client-certificate = "${certs.admin}/admin.pem";
+        client-key = "${certs.admin}/admin-key.pem";
+      };
+    }];
+    contexts = [{
+      context = {
+        cluster = "local";
+        user = "kubelet";
+      };
+      current-context = "kubelet-context";
+    }];
+  });
+
+  base = {
+    name = "e2e";
+    inherit domain certs;
+    test = ''
+      $machine1->succeed("e2e.test -kubeconfig ${kubeconfig} -provider local -ginkgo.focus '\\[Conformance\\]' -ginkgo.skip '\\[Flaky\\]|\\[Serial\\]'");
+    '';
+  };
+in {
+  singlenode = mkKubernetesSingleNodeTest base;
+  multinode = mkKubernetesMultiNodeTest base;
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/kubernetes-common.nix b/nixpkgs/nixos/tests/kubernetes/kubernetes-common.nix
new file mode 100644
index 000000000000..87c65b883659
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/kubernetes-common.nix
@@ -0,0 +1,57 @@
+{ roles, config, pkgs, certs }:
+with pkgs.lib;
+let
+  base = {
+    inherit roles;
+    flannel.enable = true;
+    addons.dashboard.enable = true;
+
+    caFile = "${certs.master}/ca.pem";
+    apiserver = {
+      tlsCertFile = "${certs.master}/kube-apiserver.pem";
+      tlsKeyFile = "${certs.master}/kube-apiserver-key.pem";
+      kubeletClientCertFile = "${certs.master}/kubelet-client.pem";
+      kubeletClientKeyFile = "${certs.master}/kubelet-client-key.pem";
+      serviceAccountKeyFile = "${certs.master}/kube-service-accounts.pem";
+    };
+    etcd = {
+      servers = ["https://etcd.${config.networking.domain}:2379"];
+      certFile = "${certs.worker}/etcd-client.pem";
+      keyFile = "${certs.worker}/etcd-client-key.pem";
+    };
+    kubeconfig = {
+      server = "https://api.${config.networking.domain}";
+    };
+    kubelet = {
+      tlsCertFile = "${certs.worker}/kubelet.pem";
+      tlsKeyFile = "${certs.worker}/kubelet-key.pem";
+      hostname = "${config.networking.hostName}.${config.networking.domain}";
+      kubeconfig = {
+        certFile = "${certs.worker}/apiserver-client-kubelet-${config.networking.hostName}.pem";
+        keyFile = "${certs.worker}/apiserver-client-kubelet-${config.networking.hostName}-key.pem";
+      };
+    };
+    controllerManager = {
+      serviceAccountKeyFile = "${certs.master}/kube-service-accounts-key.pem";
+      kubeconfig = {
+        certFile = "${certs.master}/apiserver-client-kube-controller-manager.pem";
+        keyFile = "${certs.master}/apiserver-client-kube-controller-manager-key.pem";
+      };
+    };
+    scheduler = {
+      kubeconfig = {
+        certFile = "${certs.master}/apiserver-client-kube-scheduler.pem";
+        keyFile = "${certs.master}/apiserver-client-kube-scheduler-key.pem";
+      };
+    };
+    proxy = {
+      kubeconfig = {
+        certFile = "${certs.worker}/apiserver-client-kube-proxy.pem";
+        keyFile = "${certs.worker}//apiserver-client-kube-proxy-key.pem";
+      };
+    };
+  };
+
+in {
+  services.kubernetes = base;
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/rbac.nix b/nixpkgs/nixos/tests/kubernetes/rbac.nix
new file mode 100644
index 000000000000..226808c4b263
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/rbac.nix
@@ -0,0 +1,137 @@
+{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+
+  roServiceAccount = pkgs.writeText "ro-service-account.json" (builtins.toJSON {
+    kind = "ServiceAccount";
+    apiVersion = "v1";
+    metadata = {
+      name = "read-only";
+      namespace = "default";
+    };
+  });
+
+  roRoleBinding = pkgs.writeText "ro-role-binding.json" (builtins.toJSON {
+    apiVersion = "rbac.authorization.k8s.io/v1";
+    kind = "RoleBinding";
+    metadata = {
+      name = "read-pods";
+      namespace = "default";
+    };
+    roleRef = {
+      apiGroup = "rbac.authorization.k8s.io";
+      kind = "Role";
+      name = "pod-reader";
+    };
+    subjects = [{
+      kind = "ServiceAccount";
+      name = "read-only";
+      namespace = "default";
+    }];
+  });
+
+  roRole = pkgs.writeText "ro-role.json" (builtins.toJSON {
+    apiVersion = "rbac.authorization.k8s.io/v1";
+    kind = "Role";
+    metadata = {
+      name = "pod-reader";
+      namespace = "default";
+    };
+    rules = [{
+      apiGroups = [""];
+      resources = ["pods"];
+      verbs = ["get" "list" "watch"];
+    }];
+  });
+
+  kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "kubectl";
+    metadata.namespace = "default";
+    metadata.labels.name = "kubectl";
+    spec.serviceAccountName = "read-only";
+    spec.containers = [{
+      name = "kubectl";
+      image = "kubectl:latest";
+      command = ["/bin/tail" "-f"];
+      imagePullPolicy = "Never";
+      tty = true;
+    }];
+  });
+
+  kubectlPod2 = pkgs.writeTextDir "kubectl-pod-2.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "kubectl-2";
+    metadata.namespace = "default";
+    metadata.labels.name = "kubectl-2";
+    spec.serviceAccountName = "read-only";
+    spec.containers = [{
+      name = "kubectl-2";
+      image = "kubectl:latest";
+      command = ["/bin/tail" "-f"];
+      imagePullPolicy = "Never";
+      tty = true;
+    }];
+  });
+
+  kubectl = pkgs.runCommand "copy-kubectl" { buildInputs = [ pkgs.kubernetes ]; } ''
+    mkdir -p $out/bin
+    cp ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl
+  '';
+
+  kubectlImage = pkgs.dockerTools.buildImage {
+    name = "kubectl";
+    tag = "latest";
+    contents = [ kubectl pkgs.busybox kubectlPod2 ];
+    config.Entrypoint = "/bin/sh";
+  };
+
+  base = {
+    name = "rbac";
+  };
+
+  singlenode = base // {
+    test = ''
+      $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
+
+      $machine1->execute("docker load < ${kubectlImage}");
+
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
+
+      $machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
+
+      $machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
+      $machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
+      $machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
+    '';
+  };
+
+  multinode = base // {
+    test = ''
+      $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
+      $machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
+
+      $machine2->execute("docker load < ${kubectlImage}");
+
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
+      $machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
+      $machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
+
+      $machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
+
+      $machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
+      $machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
+      $machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
+    '';
+  };
+
+in {
+  singlenode = mkKubernetesSingleNodeTest singlenode;
+  multinode = mkKubernetesMultiNodeTest multinode;
+}