about summary refs log tree commit diff
path: root/nixpkgs/nixos/modules/services/cluster/kubernetes
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/modules/services/cluster/kubernetes')
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/addon-manager.nix242
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix334
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dns.nix334
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/apiserver.nix491
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/controller-manager.nix191
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/default.nix317
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/flannel.nix184
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix426
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix407
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/proxy.nix107
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/scheduler.nix120
11 files changed, 3153 insertions, 0 deletions
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/addon-manager.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/addon-manager.nix
new file mode 100644
index 000000000000..ad7d17c9c283
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/addon-manager.nix
@@ -0,0 +1,242 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.addonManager;
+
+  isRBACEnabled = elem "RBAC" top.apiserver.authorizationMode;
+
+  addons = pkgs.runCommand "kubernetes-addons" { } ''
+    mkdir -p $out
+    # since we are mounting the addons to the addon manager, they need to be copied
+    ${concatMapStringsSep ";" (a: "cp -v ${a}/* $out/") (mapAttrsToList (name: addon:
+      pkgs.writeTextDir "${name}.json" (builtins.toJSON addon)
+    ) (cfg.addons))}
+  '';
+in
+{
+  ###### interface
+  options.services.kubernetes.addonManager = with lib.types; {
+
+    bootstrapAddons = mkOption {
+      description = ''
+        Bootstrap addons are like regular addons, but they are applied with cluster-admin rigths.
+        They are applied at addon-manager startup only.
+      '';
+      default = { };
+      type = attrsOf attrs;
+      example = literalExample ''
+        {
+          "my-service" = {
+            "apiVersion" = "v1";
+            "kind" = "Service";
+            "metadata" = {
+              "name" = "my-service";
+              "namespace" = "default";
+            };
+            "spec" = { ... };
+          };
+        }
+      '';
+    };
+
+    addons = mkOption {
+      description = "Kubernetes addons (any kind of Kubernetes resource can be an addon).";
+      default = { };
+      type = attrsOf (either attrs (listOf attrs));
+      example = literalExample ''
+        {
+          "my-service" = {
+            "apiVersion" = "v1";
+            "kind" = "Service";
+            "metadata" = {
+              "name" = "my-service";
+              "namespace" = "default";
+            };
+            "spec" = { ... };
+          };
+        }
+        // import <nixpkgs/nixos/modules/services/cluster/kubernetes/dashboard.nix> { cfg = config.services.kubernetes; };
+      '';
+    };
+
+    enable = mkEnableOption "Kubernetes addon manager";
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager";
+    bootstrapAddonsKubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager bootstrap";
+  };
+
+  ###### implementation
+  config = let
+
+    addonManagerPaths = filter (a: a != null) [
+      cfg.kubeconfig.caFile
+      cfg.kubeconfig.certFile
+      cfg.kubeconfig.keyFile
+    ];
+    bootstrapAddonsPaths = filter (a: a != null) [
+      cfg.bootstrapAddonsKubeconfig.caFile
+      cfg.bootstrapAddonsKubeconfig.certFile
+      cfg.bootstrapAddonsKubeconfig.keyFile
+    ];
+
+  in mkIf cfg.enable {
+    environment.etc."kubernetes/addons".source = "${addons}/";
+
+    #TODO: Get rid of kube-addon-manager in the future for the following reasons
+    # - it is basically just a shell script wrapped around kubectl
+    # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
+    # - it is designed to be used with k8s system components only
+    # - it would be better with a more Nix-oriented way of managing addons
+    systemd.services.kube-addon-manager = {
+      description = "Kubernetes addon manager";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-node-online.target" ];
+      before = [ "kubernetes.target" ];
+      environment = {
+        ADDON_PATH = "/etc/kubernetes/addons/";
+        KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager" cfg.kubeconfig;
+      };
+      path = with pkgs; [ gawk kubectl ];
+      preStart = ''
+        until kubectl -n kube-system get serviceaccounts/default 2>/dev/null; do
+          echo kubectl -n kube-system get serviceaccounts/default: exit status $?
+          sleep 2
+        done
+      '';
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = "${top.package}/bin/kube-addons";
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+        Restart = "on-failure";
+        RestartSec = 10;
+      };
+      unitConfig.ConditionPathExists = addonManagerPaths;
+    };
+
+    systemd.paths.kube-addon-manager = {
+      wantedBy = [ "kube-addon-manager.service" ];
+      pathConfig = {
+        PathExists = addonManagerPaths;
+        PathChanged = addonManagerPaths;
+      };
+    };
+
+    services.kubernetes.addonManager.kubeconfig.server = mkDefault top.apiserverAddress;
+
+    systemd.services.kube-addon-manager-bootstrap = mkIf (top.apiserver.enable && top.addonManager.bootstrapAddons != {}) {
+      wantedBy = [ "kube-control-plane-online.target" ];
+      after = [ "kube-apiserver.service" ];
+      before = [ "kube-control-plane-online.target" ];
+      path = [ pkgs.kubectl ];
+      environment = {
+        KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager-bootstrap" cfg.bootstrapAddonsKubeconfig;
+      };
+      preStart = with pkgs; let
+        files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
+          cfg.bootstrapAddons;
+      in ''
+        until kubectl auth can-i '*' '*' -q 2>/dev/null; do
+          echo kubectl auth can-i '*' '*': exit status $?
+          sleep 2
+        done
+
+        kubectl apply -f ${concatStringsSep " \\\n -f " files}
+      '';
+      script = "echo Ok";
+      unitConfig.ConditionPathExists = bootstrapAddonsPaths;
+    };
+
+    systemd.paths.kube-addon-manager-bootstrap = {
+      wantedBy = [ "kube-addon-manager-bootstrap.service" ];
+      pathConfig = {
+        PathExists = bootstrapAddonsPaths;
+        PathChanged = bootstrapAddonsPaths;
+      };
+    };
+
+    services.kubernetes.addonManager.bootstrapAddonsKubeconfig.server = mkDefault top.apiserverAddress;
+
+    services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
+    (let
+      name = system:kube-addon-manager;
+      namespace = "kube-system";
+    in
+    {
+
+      kube-addon-manager-r = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "Role";
+        metadata = {
+          inherit name namespace;
+        };
+        rules = [{
+          apiGroups = ["*"];
+          resources = ["*"];
+          verbs = ["*"];
+        }];
+      };
+
+      kube-addon-manager-rb = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "RoleBinding";
+        metadata = {
+          inherit name namespace;
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "Role";
+          inherit name;
+        };
+        subjects = [{
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "User";
+          inherit name;
+        }];
+      };
+
+      kube-addon-manager-cluster-lister-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRole";
+        metadata = {
+          name = "${name}:cluster-lister";
+        };
+        rules = [{
+          apiGroups = ["*"];
+          resources = ["*"];
+          verbs = ["list"];
+        }];
+      };
+
+      kube-addon-manager-cluster-lister-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRoleBinding";
+        metadata = {
+          name = "${name}:cluster-lister";
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "${name}:cluster-lister";
+        };
+        subjects = [{
+          kind = "User";
+          inherit name;
+        }];
+      };
+    });
+
+    services.kubernetes.pki.certs = {
+      addonManager = top.lib.mkCert {
+        name = "kube-addon-manager";
+        CN = "system:kube-addon-manager";
+        action = "systemctl restart kube-addon-manager.service";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
new file mode 100644
index 000000000000..2295694ffc74
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
@@ -0,0 +1,334 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.kubernetes.addons.dashboard;
+in {
+  options.services.kubernetes.addons.dashboard = {
+    enable = mkEnableOption "kubernetes dashboard addon";
+
+    extraArgs = mkOption {
+      description = "Extra arguments to append to the dashboard cmdline";
+      type = types.listOf types.str;
+      default = [];
+      example = ["--enable-skip-login"];
+    };
+
+    rbac = mkOption {
+      description = "Role-based access control (RBAC) options";
+      default = {};
+      type = types.submodule {
+        options = {
+          enable = mkOption {
+            description = "Whether to enable role based access control is enabled for kubernetes dashboard";
+            type = types.bool;
+            default = elem "RBAC" config.services.kubernetes.apiserver.authorizationMode;
+          };
+
+          clusterAdmin = mkOption {
+            description = "Whether to assign cluster admin rights to the kubernetes dashboard";
+            type = types.bool;
+            default = false;
+          };
+        };
+      };
+    };
+
+    version = mkOption {
+      description = "Which version of the kubernetes dashboard to deploy";
+      type = types.str;
+      default = "v1.10.1";
+    };
+
+    image = mkOption {
+      description = "Docker image to seed for the kubernetes dashboard container.";
+      type = types.attrs;
+      default = {
+        imageName = "k8s.gcr.io/kubernetes-dashboard-amd64";
+        imageDigest = "sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747";
+        finalImageTag = cfg.version;
+        sha256 = "01xrr4pwgr2hcjrjsi3d14ifpzdfbxzqpzxbk2fkbjb9zkv38zxy";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.kubernetes.kubelet.seedDockerImages = [(pkgs.dockerTools.pullImage cfg.image)];
+
+    services.kubernetes.addonManager.addons = {
+      kubernetes-dashboard-deployment = {
+        kind = "Deployment";
+        apiVersion = "apps/v1";
+        metadata = {
+          labels = {
+            k8s-addon = "kubernetes-dashboard.addons.k8s.io";
+            k8s-app = "kubernetes-dashboard";
+            version = cfg.version;
+            "kubernetes.io/cluster-service" = "true";
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+          };
+          name = "kubernetes-dashboard";
+          namespace = "kube-system";
+        };
+        spec = {
+          replicas = 1;
+          revisionHistoryLimit = 10;
+          selector.matchLabels."k8s-app" = "kubernetes-dashboard";
+          template = {
+            metadata = {
+              labels = {
+                k8s-addon = "kubernetes-dashboard.addons.k8s.io";
+                k8s-app = "kubernetes-dashboard";
+                version = cfg.version;
+                "kubernetes.io/cluster-service" = "true";
+              };
+              annotations = {
+                "scheduler.alpha.kubernetes.io/critical-pod" = "";
+              };
+            };
+            spec = {
+              priorityClassName = "system-cluster-critical";
+              containers = [{
+                name = "kubernetes-dashboard";
+                image = with cfg.image; "${imageName}:${finalImageTag}";
+                ports = [{
+                  containerPort = 8443;
+                  protocol = "TCP";
+                }];
+                resources = {
+                  limits = {
+                    cpu = "100m";
+                    memory = "300Mi";
+                  };
+                  requests = {
+                    cpu = "100m";
+                    memory = "100Mi";
+                  };
+                };
+                args = ["--auto-generate-certificates"] ++ cfg.extraArgs;
+                volumeMounts = [{
+                  name = "tmp-volume";
+                  mountPath = "/tmp";
+                } {
+                  name = "kubernetes-dashboard-certs";
+                  mountPath = "/certs";
+                }];
+                livenessProbe = {
+                  httpGet = {
+                    scheme = "HTTPS";
+                    path = "/";
+                    port = 8443;
+                  };
+                  initialDelaySeconds = 30;
+                  timeoutSeconds = 30;
+                };
+              }];
+              volumes = [{
+                name = "kubernetes-dashboard-certs";
+                secret = {
+                  secretName = "kubernetes-dashboard-certs";
+                };
+              } {
+                name = "tmp-volume";
+                emptyDir = {};
+              }];
+              serviceAccountName = "kubernetes-dashboard";
+              tolerations = [{
+                key = "node-role.kubernetes.io/master";
+                effect = "NoSchedule";
+              } {
+                key = "CriticalAddonsOnly";
+                operator = "Exists";
+              }];
+            };
+          };
+        };
+      };
+
+      kubernetes-dashboard-svc = {
+        apiVersion = "v1";
+        kind = "Service";
+        metadata = {
+          labels = {
+            k8s-addon = "kubernetes-dashboard.addons.k8s.io";
+            k8s-app = "kubernetes-dashboard";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/name" = "KubeDashboard";
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+          };
+          name = "kubernetes-dashboard";
+          namespace  = "kube-system";
+        };
+        spec = {
+          ports = [{
+            port = 443;
+            targetPort = 8443;
+          }];
+          selector.k8s-app = "kubernetes-dashboard";
+        };
+      };
+
+      kubernetes-dashboard-cm = {
+        apiVersion = "v1";
+        kind = "ConfigMap";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
+          };
+          name = "kubernetes-dashboard-settings";
+          namespace = "kube-system";
+        };
+      };
+    };
+
+    services.kubernetes.addonManager.bootstrapAddons = mkMerge [{
+
+      kubernetes-dashboard-sa = {
+        apiVersion = "v1";
+        kind = "ServiceAccount";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            k8s-addon = "kubernetes-dashboard.addons.k8s.io";
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+          };
+          name = "kubernetes-dashboard";
+          namespace = "kube-system";
+        };
+      };
+      kubernetes-dashboard-sec-certs = {
+        apiVersion = "v1";
+        kind = "Secret";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
+          };
+          name = "kubernetes-dashboard-certs";
+          namespace = "kube-system";
+        };
+        type = "Opaque";
+      };
+      kubernetes-dashboard-sec-kholder = {
+        apiVersion = "v1";
+        kind = "Secret";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
+          };
+          name = "kubernetes-dashboard-key-holder";
+          namespace = "kube-system";
+        };
+        type = "Opaque";
+      };
+    }
+
+    (optionalAttrs cfg.rbac.enable
+      (let
+        subjects = [{
+          kind = "ServiceAccount";
+          name = "kubernetes-dashboard";
+          namespace = "kube-system";
+        }];
+        labels = {
+          k8s-app = "kubernetes-dashboard";
+          k8s-addon = "kubernetes-dashboard.addons.k8s.io";
+          "addonmanager.kubernetes.io/mode" = "Reconcile";
+        };
+      in
+        (if cfg.rbac.clusterAdmin then {
+          kubernetes-dashboard-crb = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "ClusterRoleBinding";
+            metadata = {
+              name = "kubernetes-dashboard";
+              inherit labels;
+            };
+            roleRef = {
+              apiGroup = "rbac.authorization.k8s.io";
+              kind = "ClusterRole";
+              name = "cluster-admin";
+            };
+            inherit subjects;
+          };
+        }
+        else
+        {
+          # Upstream role- and rolebinding as per:
+          # https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/alternative/kubernetes-dashboard.yaml
+          kubernetes-dashboard-role = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "Role";
+            metadata = {
+              name = "kubernetes-dashboard-minimal";
+              namespace = "kube-system";
+              inherit labels;
+            };
+            rules = [
+              # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
+              {
+                apiGroups = [""];
+                resources = ["secrets"];
+                verbs = ["create"];
+              }
+              # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
+              {
+                apiGroups = [""];
+                resources = ["configmaps"];
+                verbs = ["create"];
+              }
+              # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
+              {
+                apiGroups = [""];
+                resources = ["secrets"];
+                resourceNames = ["kubernetes-dashboard-key-holder"];
+                verbs = ["get" "update" "delete"];
+              }
+              # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
+              {
+                apiGroups = [""];
+                resources = ["configmaps"];
+                resourceNames = ["kubernetes-dashboard-settings"];
+                verbs = ["get" "update"];
+              }
+              # Allow Dashboard to get metrics from heapster.
+              {
+                apiGroups = [""];
+                resources = ["services"];
+                resourceNames = ["heapster"];
+                verbs = ["proxy"];
+              }
+              {
+                apiGroups = [""];
+                resources = ["services/proxy"];
+                resourceNames = ["heapster" "http:heapster:" "https:heapster:"];
+                verbs = ["get"];
+              }
+            ];
+          };
+
+          kubernetes-dashboard-rb = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "RoleBinding";
+            metadata = {
+              name = "kubernetes-dashboard-minimal";
+              namespace = "kube-system";
+              inherit labels;
+            };
+            roleRef = {
+              apiGroup = "rbac.authorization.k8s.io";
+              kind = "Role";
+              name = "kubernetes-dashboard-minimal";
+            };
+            inherit subjects;
+          };
+        })
+    ))];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dns.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dns.nix
new file mode 100644
index 000000000000..4368159ea6e3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dns.nix
@@ -0,0 +1,334 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  version = "1.3.1";
+  cfg = config.services.kubernetes.addons.dns;
+  ports = {
+    dns = 10053;
+    health = 10054;
+    metrics = 10055;
+  };
+in {
+  options.services.kubernetes.addons.dns = {
+    enable = mkEnableOption "kubernetes dns addon";
+
+    clusterIp = mkOption {
+      description = "Dns addon clusterIP";
+
+      # this default is also what kubernetes users
+      default = (
+        concatStringsSep "." (
+          take 3 (splitString "." config.services.kubernetes.apiserver.serviceClusterIpRange
+        ))
+      ) + ".254";
+      type = types.str;
+    };
+
+    clusterDomain = mkOption {
+      description = "Dns cluster domain";
+      default = "cluster.local";
+      type = types.str;
+    };
+
+    replicas = mkOption {
+      description = "Number of DNS pod replicas to deploy in the cluster.";
+      default = 2;
+      type = types.int;
+    };
+
+    reconcileMode = mkOption {
+      description = ''
+        Controls the addon manager reconciliation mode for the DNS addon.
+
+        Setting reconcile mode to EnsureExists makes it possible to tailor DNS behavior by editing the coredns ConfigMap.
+
+        See: <link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/addon-manager/README.md"/>.
+      '';
+      default = "Reconcile";
+      type = types.enum [ "Reconcile" "EnsureExists" ];
+    };
+
+    coredns = mkOption {
+      description = "Docker image to seed for the CoreDNS container.";
+      type = types.attrs;
+      default = {
+        imageName = "coredns/coredns";
+        imageDigest = "sha256:02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4";
+        finalImageTag = version;
+        sha256 = "0vbylgyxv2jm2mnzk6f28jbsj305zsxmx3jr6ngjq461czcl5fi5";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.kubernetes.kubelet.seedDockerImages =
+      singleton (pkgs.dockerTools.pullImage cfg.coredns);
+
+    services.kubernetes.addonManager.bootstrapAddons = {
+      coredns-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRole";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/bootstrapping" = "rbac-defaults";
+          };
+          name = "system:coredns";
+        };
+        rules = [
+          {
+            apiGroups = [ "" ];
+            resources = [ "endpoints" "services" "pods" "namespaces" ];
+            verbs = [ "list" "watch" ];
+          }
+          {
+            apiGroups = [ "" ];
+            resources = [ "nodes" ];
+            verbs = [ "get" ];
+          }
+        ];
+      };
+
+      coredns-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRoleBinding";
+        metadata = {
+          annotations = {
+            "rbac.authorization.kubernetes.io/autoupdate" = "true";
+          };
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/bootstrapping" = "rbac-defaults";
+          };
+          name = "system:coredns";
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "system:coredns";
+        };
+        subjects = [
+          {
+            kind = "ServiceAccount";
+            name = "coredns";
+            namespace = "kube-system";
+          }
+        ];
+      };
+    };
+
+    services.kubernetes.addonManager.addons = {
+      coredns-sa = {
+        apiVersion = "v1";
+        kind = "ServiceAccount";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+      };
+
+      coredns-cm = {
+        apiVersion = "v1";
+        kind = "ConfigMap";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = cfg.reconcileMode;
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+        data = {
+          Corefile = ".:${toString ports.dns} {
+            errors
+            health :${toString ports.health}
+            kubernetes ${cfg.clusterDomain} in-addr.arpa ip6.arpa {
+              pods insecure
+              upstream
+              fallthrough in-addr.arpa ip6.arpa
+            }
+            prometheus :${toString ports.metrics}
+            proxy . /etc/resolv.conf
+            cache 30
+            loop
+            reload
+            loadbalance
+          }";
+        };
+      };
+
+      coredns-deploy = {
+        apiVersion = "extensions/v1beta1";
+        kind = "Deployment";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = cfg.reconcileMode;
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/name" = "CoreDNS";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+        spec = {
+          replicas = cfg.replicas;
+          selector = {
+            matchLabels = { k8s-app = "kube-dns"; };
+          };
+          strategy = {
+            rollingUpdate = { maxUnavailable = 1; };
+            type = "RollingUpdate";
+          };
+          template = {
+            metadata = {
+              labels = {
+                k8s-app = "kube-dns";
+              };
+            };
+            spec = {
+              containers = [
+                {
+                  args = [ "-conf" "/etc/coredns/Corefile" ];
+                  image = with cfg.coredns; "${imageName}:${finalImageTag}";
+                  imagePullPolicy = "Never";
+                  livenessProbe = {
+                    failureThreshold = 5;
+                    httpGet = {
+                      path = "/health";
+                      port = ports.health;
+                      scheme = "HTTP";
+                    };
+                    initialDelaySeconds = 60;
+                    successThreshold = 1;
+                    timeoutSeconds = 5;
+                  };
+                  name = "coredns";
+                  ports = [
+                    {
+                      containerPort = ports.dns;
+                      name = "dns";
+                      protocol = "UDP";
+                    }
+                    {
+                      containerPort = ports.dns;
+                      name = "dns-tcp";
+                      protocol = "TCP";
+                    }
+                    {
+                      containerPort = ports.metrics;
+                      name = "metrics";
+                      protocol = "TCP";
+                    }
+                  ];
+                  resources = {
+                    limits = {
+                      memory = "170Mi";
+                    };
+                    requests = {
+                      cpu = "100m";
+                      memory = "70Mi";
+                    };
+                  };
+                  securityContext = {
+                    allowPrivilegeEscalation = false;
+                    capabilities = {
+                      drop = [ "all" ];
+                    };
+                    readOnlyRootFilesystem = true;
+                  };
+                  volumeMounts = [
+                    {
+                      mountPath = "/etc/coredns";
+                      name = "config-volume";
+                      readOnly = true;
+                    }
+                  ];
+                }
+              ];
+              dnsPolicy = "Default";
+              nodeSelector = {
+                "beta.kubernetes.io/os" = "linux";
+              };
+              serviceAccountName = "coredns";
+              tolerations = [
+                {
+                  effect = "NoSchedule";
+                  key = "node-role.kubernetes.io/master";
+                }
+                {
+                  key = "CriticalAddonsOnly";
+                  operator = "Exists";
+                }
+              ];
+              volumes = [
+                {
+                  configMap = {
+                    items = [
+                      {
+                        key = "Corefile";
+                        path = "Corefile";
+                      }
+                    ];
+                    name = "coredns";
+                  };
+                  name = "config-volume";
+                }
+              ];
+            };
+          };
+        };
+      };
+
+      coredns-svc = {
+        apiVersion = "v1";
+        kind = "Service";
+        metadata = {
+          annotations = {
+            "prometheus.io/port" = toString ports.metrics;
+            "prometheus.io/scrape" = "true";
+          };
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/name" = "CoreDNS";
+          };
+          name = "kube-dns";
+          namespace = "kube-system";
+        };
+        spec = {
+          clusterIP = cfg.clusterIp;
+          ports = [
+            {
+              name = "dns";
+              port = 53;
+              targetPort = ports.dns;
+              protocol = "UDP";
+            }
+            {
+              name = "dns-tcp";
+              port = 53;
+              targetPort = ports.dns;
+              protocol = "TCP";
+            }
+          ];
+          selector = { k8s-app = "kube-dns"; };
+        };
+      };
+    };
+
+    services.kubernetes.kubelet.clusterDns = mkDefault cfg.clusterIp;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/apiserver.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/apiserver.nix
new file mode 100644
index 000000000000..0c04648355b4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/apiserver.nix
@@ -0,0 +1,491 @@
+  { config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.apiserver;
+
+  isRBACEnabled = elem "RBAC" cfg.authorizationMode;
+
+  apiserverServiceIP = (concatStringsSep "." (
+    take 3 (splitString "." cfg.serviceClusterIpRange
+  )) + ".1");
+in
+{
+  ###### interface
+  options.services.kubernetes.apiserver = with lib.types; {
+
+    advertiseAddress = mkOption {
+      description = ''
+        Kubernetes apiserver IP address on which to advertise the apiserver
+        to members of the cluster. This address must be reachable by the rest
+        of the cluster.
+      '';
+      default = null;
+      type = nullOr str;
+    };
+
+    allowPrivileged = mkOption {
+      description = "Whether to allow privileged containers on Kubernetes.";
+      default = false;
+      type = bool;
+    };
+
+    authorizationMode = mkOption {
+      description = ''
+        Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
+      '';
+      default = ["RBAC" "Node"]; # Enabling RBAC by default, although kubernetes default is AllowAllow
+      type = listOf (enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
+    };
+
+    authorizationPolicy = mkOption {
+      description = ''
+        Kubernetes apiserver authorization policy file. See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
+      '';
+      default = [];
+      type = listOf attrs;
+    };
+
+    basicAuthFile = mkOption {
+      description = ''
+        Kubernetes apiserver basic authentication file. See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    bindAddress = mkOption {
+      description = ''
+        The IP address on which to listen for the --secure-port port.
+        The associated interface(s) must be reachable by the rest
+        of the cluster, and by CLI/web clients.
+      '';
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    clientCaFile = mkOption {
+      description = "Kubernetes apiserver CA file for client auth.";
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    disableAdmissionPlugins = mkOption {
+      description = ''
+        Kubernetes admission control plugins to disable. See
+        <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
+      '';
+      default = [];
+      type = listOf str;
+    };
+
+    enable = mkEnableOption "Kubernetes apiserver";
+
+    enableAdmissionPlugins = mkOption {
+      description = ''
+        Kubernetes admission control plugins to enable. See
+        <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
+      '';
+      default = [
+        "NamespaceLifecycle" "LimitRanger" "ServiceAccount"
+        "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds"
+        "NodeRestriction"
+      ];
+      example = [
+        "NamespaceLifecycle" "NamespaceExists" "LimitRanger"
+        "SecurityContextDeny" "ServiceAccount" "ResourceQuota"
+        "PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
+      ];
+      type = listOf str;
+    };
+
+    etcd = {
+      servers = mkOption {
+        description = "List of etcd servers.";
+        default = ["http://127.0.0.1:2379"];
+        type = types.listOf types.str;
+      };
+
+      keyFile = mkOption {
+        description = "Etcd key file.";
+        default = null;
+        type = types.nullOr types.path;
+      };
+
+      certFile = mkOption {
+        description = "Etcd cert file.";
+        default = null;
+        type = types.nullOr types.path;
+      };
+
+      caFile = mkOption {
+        description = "Etcd ca file.";
+        default = top.caFile;
+        type = types.nullOr types.path;
+      };
+    };
+
+    extraOpts = mkOption {
+      description = "Kubernetes apiserver extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    extraSANs = mkOption {
+      description = "Extra x509 Subject Alternative Names to be added to the kubernetes apiserver tls cert.";
+      default = [];
+      type = listOf str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    insecureBindAddress = mkOption {
+      description = "The IP address on which to serve the --insecure-port.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    insecurePort = mkOption {
+      description = "Kubernetes apiserver insecure listening port. (0 = disabled)";
+      default = 0;
+      type = int;
+    };
+
+    kubeletClientCaFile = mkOption {
+      description = "Path to a cert file for connecting to kubelet.";
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    kubeletClientCertFile = mkOption {
+      description = "Client certificate to use for connections to kubelet.";
+      default = null;
+      type = nullOr path;
+    };
+
+    kubeletClientKeyFile = mkOption {
+      description = "Key to use for connections to kubelet.";
+      default = null;
+      type = nullOr path;
+    };
+
+    kubeletHttps = mkOption {
+      description = "Whether to use https for connections to kubelet.";
+      default = true;
+      type = bool;
+    };
+
+    proxyClientCertFile = mkOption {
+      description = "Client certificate to use for connections to proxy.";
+      default = null;
+      type = nullOr path;
+    };
+
+    proxyClientKeyFile = mkOption {
+      description = "Key to use for connections to proxy.";
+      default = null;
+      type = nullOr path;
+    };
+
+    runtimeConfig = mkOption {
+      description = ''
+        Api runtime configuration. See
+        <link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
+      '';
+      default = "authentication.k8s.io/v1beta1=true";
+      example = "api/all=false,api/v1=true";
+      type = str;
+    };
+
+    storageBackend = mkOption {
+      description = ''
+        Kubernetes apiserver storage backend.
+      '';
+      default = "etcd3";
+      type = enum ["etcd2" "etcd3"];
+    };
+
+    securePort = mkOption {
+      description = "Kubernetes apiserver secure port.";
+      default = 6443;
+      type = int;
+    };
+
+    serviceAccountKeyFile = mkOption {
+      description = ''
+        Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
+        used to verify ServiceAccount tokens. By default tls private key file
+        is used.
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    serviceClusterIpRange = mkOption {
+      description = ''
+        A CIDR notation IP range from which to assign service cluster IPs.
+        This must not overlap with any IP ranges assigned to nodes for pods.
+      '';
+      default = "10.0.0.0/24";
+      type = str;
+    };
+
+    tlsCertFile = mkOption {
+      description = "Kubernetes apiserver certificate file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = "Kubernetes apiserver private key file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tokenAuthFile = mkOption {
+      description = ''
+        Kubernetes apiserver token authentication file. See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+    webhookConfig = mkOption {
+      description = ''
+        Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
+        See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+  };
+
+
+  ###### implementation
+  config = mkMerge [
+
+    (let
+
+      apiserverPaths = filter (a: a != null) [
+        cfg.clientCaFile
+        cfg.etcd.caFile
+        cfg.etcd.certFile
+        cfg.etcd.keyFile
+        cfg.kubeletClientCaFile
+        cfg.kubeletClientCertFile
+        cfg.kubeletClientKeyFile
+        cfg.serviceAccountKeyFile
+        cfg.tlsCertFile
+        cfg.tlsKeyFile
+      ];
+      etcdPaths = filter (a: a != null) [
+        config.services.etcd.trustedCaFile
+        config.services.etcd.certFile
+        config.services.etcd.keyFile
+      ];
+
+    in mkIf cfg.enable {
+        systemd.services.kube-apiserver = {
+          description = "Kubernetes APIServer Service";
+          wantedBy = [ "kube-control-plane-online.target" ];
+          after = [ "certmgr.service" ];
+          before = [ "kube-control-plane-online.target" ];
+          serviceConfig = {
+            Slice = "kubernetes.slice";
+            ExecStart = ''${top.package}/bin/kube-apiserver \
+              --allow-privileged=${boolToString cfg.allowPrivileged} \
+              --authorization-mode=${concatStringsSep "," cfg.authorizationMode} \
+                ${optionalString (elem "ABAC" cfg.authorizationMode)
+                  "--authorization-policy-file=${
+                    pkgs.writeText "kube-auth-policy.jsonl"
+                    (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.authorizationPolicy)
+                  }"
+                } \
+                ${optionalString (elem "Webhook" cfg.authorizationMode)
+                  "--authorization-webhook-config-file=${cfg.webhookConfig}"
+                } \
+              --bind-address=${cfg.bindAddress} \
+              ${optionalString (cfg.advertiseAddress != null)
+                "--advertise-address=${cfg.advertiseAddress}"} \
+              ${optionalString (cfg.clientCaFile != null)
+                "--client-ca-file=${cfg.clientCaFile}"} \
+              --disable-admission-plugins=${concatStringsSep "," cfg.disableAdmissionPlugins} \
+              --enable-admission-plugins=${concatStringsSep "," cfg.enableAdmissionPlugins} \
+              --etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
+              ${optionalString (cfg.etcd.caFile != null)
+                "--etcd-cafile=${cfg.etcd.caFile}"} \
+              ${optionalString (cfg.etcd.certFile != null)
+                "--etcd-certfile=${cfg.etcd.certFile}"} \
+              ${optionalString (cfg.etcd.keyFile != null)
+                "--etcd-keyfile=${cfg.etcd.keyFile}"} \
+              ${optionalString (cfg.featureGates != [])
+                "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+              ${optionalString (cfg.basicAuthFile != null)
+                "--basic-auth-file=${cfg.basicAuthFile}"} \
+              --kubelet-https=${boolToString cfg.kubeletHttps} \
+              ${optionalString (cfg.kubeletClientCaFile != null)
+                "--kubelet-certificate-authority=${cfg.kubeletClientCaFile}"} \
+              ${optionalString (cfg.kubeletClientCertFile != null)
+                "--kubelet-client-certificate=${cfg.kubeletClientCertFile}"} \
+              ${optionalString (cfg.kubeletClientKeyFile != null)
+                "--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
+              ${optionalString (cfg.proxyClientCertFile != null)
+                "--proxy-client-cert-file=${cfg.proxyClientCertFile}"} \
+              ${optionalString (cfg.proxyClientKeyFile != null)
+                "--proxy-client-key-file=${cfg.proxyClientKeyFile}"} \
+              --insecure-bind-address=${cfg.insecureBindAddress} \
+              --insecure-port=${toString cfg.insecurePort} \
+              ${optionalString (cfg.runtimeConfig != "")
+                "--runtime-config=${cfg.runtimeConfig}"} \
+              --secure-port=${toString cfg.securePort} \
+              ${optionalString (cfg.serviceAccountKeyFile!=null)
+                "--service-account-key-file=${cfg.serviceAccountKeyFile}"} \
+              --service-cluster-ip-range=${cfg.serviceClusterIpRange} \
+              --storage-backend=${cfg.storageBackend} \
+              ${optionalString (cfg.tlsCertFile != null)
+                "--tls-cert-file=${cfg.tlsCertFile}"} \
+              ${optionalString (cfg.tlsKeyFile != null)
+                "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+              ${optionalString (cfg.tokenAuthFile != null)
+                "--token-auth-file=${cfg.tokenAuthFile}"} \
+              ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+              ${cfg.extraOpts}
+            '';
+            WorkingDirectory = top.dataDir;
+            User = "kubernetes";
+            Group = "kubernetes";
+            AmbientCapabilities = "cap_net_bind_service";
+            Restart = "on-failure";
+            RestartSec = 5;
+          };
+          unitConfig.ConditionPathExists = apiserverPaths;
+        };
+
+        systemd.paths.kube-apiserver = mkIf top.apiserver.enable {
+          wantedBy = [ "kube-apiserver.service" ];
+          pathConfig = {
+            PathExists = apiserverPaths;
+            PathChanged = apiserverPaths;
+          };
+        };
+
+        services.etcd = {
+          clientCertAuth = mkDefault true;
+          peerClientCertAuth = mkDefault true;
+          listenClientUrls = mkDefault ["https://0.0.0.0:2379"];
+          listenPeerUrls = mkDefault ["https://0.0.0.0:2380"];
+          advertiseClientUrls = mkDefault ["https://${top.masterAddress}:2379"];
+          initialCluster = mkDefault ["${top.masterAddress}=https://${top.masterAddress}:2380"];
+          name = mkDefault top.masterAddress;
+          initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
+        };
+
+        systemd.services.etcd = {
+          unitConfig.ConditionPathExists = etcdPaths;
+        };
+
+        systemd.paths.etcd = {
+          wantedBy = [ "etcd.service" ];
+          pathConfig = {
+            PathExists = etcdPaths;
+            PathChanged = etcdPaths;
+          };
+        };
+
+        services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
+
+          apiserver-kubelet-api-admin-crb = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "ClusterRoleBinding";
+            metadata = {
+              name = "system:kube-apiserver:kubelet-api-admin";
+            };
+            roleRef = {
+              apiGroup = "rbac.authorization.k8s.io";
+              kind = "ClusterRole";
+              name = "system:kubelet-api-admin";
+            };
+            subjects = [{
+              kind = "User";
+              name = "system:kube-apiserver";
+            }];
+          };
+
+        };
+
+      services.kubernetes.pki.certs = with top.lib; {
+        apiServer = mkCert {
+          name = "kube-apiserver";
+          CN = "kubernetes";
+          hosts = [
+                    "kubernetes.default.svc"
+                    "kubernetes.default.svc.${top.addons.dns.clusterDomain}"
+                    cfg.advertiseAddress
+                    top.masterAddress
+                    apiserverServiceIP
+                    "127.0.0.1"
+                  ] ++ cfg.extraSANs;
+          action = "systemctl restart kube-apiserver.service";
+        };
+        apiserverProxyClient = mkCert {
+          name = "kube-apiserver-proxy-client";
+          CN = "front-proxy-client";
+          action = "systemctl restart kube-apiserver.service";
+        };
+        apiserverKubeletClient = mkCert {
+          name = "kube-apiserver-kubelet-client";
+          CN = "system:kube-apiserver";
+          action = "systemctl restart kube-apiserver.service";
+        };
+        apiserverEtcdClient = mkCert {
+          name = "kube-apiserver-etcd-client";
+          CN = "etcd-client";
+          action = "systemctl restart kube-apiserver.service";
+        };
+        clusterAdmin = mkCert {
+          name = "cluster-admin";
+          CN = "cluster-admin";
+          fields = {
+            O = "system:masters";
+          };
+          privateKeyOwner = "root";
+        };
+        etcd = mkCert {
+          name = "etcd";
+          CN = top.masterAddress;
+          hosts = [
+                    "etcd.local"
+                    "etcd.${top.addons.dns.clusterDomain}"
+                    top.masterAddress
+                    cfg.advertiseAddress
+                  ];
+          privateKeyOwner = "etcd";
+          action = "systemctl restart etcd.service";
+        };
+      };
+
+    })
+
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/controller-manager.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/controller-manager.nix
new file mode 100644
index 000000000000..b94e8bd86d4c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/controller-manager.nix
@@ -0,0 +1,191 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.controllerManager;
+in
+{
+  ###### interface
+  options.services.kubernetes.controllerManager = with lib.types; {
+
+    allocateNodeCIDRs = mkOption {
+      description = "Whether to automatically allocate CIDR ranges for cluster nodes.";
+      default = true;
+      type = bool;
+    };
+
+    bindAddress = mkOption {
+      description = "Kubernetes controller manager listening address.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    clusterCidr = mkOption {
+      description = "Kubernetes CIDR Range for Pods in cluster.";
+      default = top.clusterCidr;
+      type = str;
+    };
+
+    enable = mkEnableOption "Kubernetes controller manager";
+
+    extraOpts = mkOption {
+      description = "Kubernetes controller manager extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    insecurePort = mkOption {
+      description = "Kubernetes controller manager insecure listening port.";
+      default = 0;
+      type = int;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes controller manager";
+
+    leaderElect = mkOption {
+      description = "Whether to start leader election before executing main loop.";
+      type = bool;
+      default = true;
+    };
+
+    rootCaFile = mkOption {
+      description = ''
+        Kubernetes controller manager certificate authority file included in
+        service account's token secret.
+      '';
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    securePort = mkOption {
+      description = "Kubernetes controller manager secure listening port.";
+      default = 10252;
+      type = int;
+    };
+
+    serviceAccountKeyFile = mkOption {
+      description = ''
+        Kubernetes controller manager PEM-encoded private RSA key file used to
+        sign service account tokens
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsCertFile = mkOption {
+      description = "Kubernetes controller-manager certificate file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = "Kubernetes controller-manager private key file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = let
+
+    controllerManagerPaths = filter (a: a != null) [
+      cfg.kubeconfig.caFile
+      cfg.kubeconfig.certFile
+      cfg.kubeconfig.keyFile
+      cfg.rootCaFile
+      cfg.serviceAccountKeyFile
+      cfg.tlsCertFile
+      cfg.tlsKeyFile
+    ];
+
+  in mkIf cfg.enable {
+    systemd.services.kube-controller-manager = rec {
+      description = "Kubernetes Controller Manager Service";
+      wantedBy = [ "kube-control-plane-online.target" ];
+      after = [ "kube-apiserver.service" ];
+      before = [ "kube-control-plane-online.target" ];
+      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig;
+      preStart = ''
+        until kubectl auth can-i get /api -q 2>/dev/null; do
+          echo kubectl auth can-i get /api: exit status $?
+          sleep 2
+        done
+      '';
+      serviceConfig = {
+        RestartSec = "30s";
+        Restart = "on-failure";
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-controller-manager \
+          --allocate-node-cidrs=${boolToString cfg.allocateNodeCIDRs} \
+          --bind-address=${cfg.bindAddress} \
+          ${optionalString (cfg.clusterCidr!=null)
+            "--cluster-cidr=${cfg.clusterCidr}"} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${environment.KUBECONFIG} \
+          --leader-elect=${boolToString cfg.leaderElect} \
+          ${optionalString (cfg.rootCaFile!=null)
+            "--root-ca-file=${cfg.rootCaFile}"} \
+          --port=${toString cfg.insecurePort} \
+          --secure-port=${toString cfg.securePort} \
+          ${optionalString (cfg.serviceAccountKeyFile!=null)
+            "--service-account-private-key-file=${cfg.serviceAccountKeyFile}"} \
+          ${optionalString (cfg.tlsCertFile!=null)
+            "--tls-cert-file=${cfg.tlsCertFile}"} \
+          ${optionalString (cfg.tlsKeyFile!=null)
+            "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+          ${optionalString (elem "RBAC" top.apiserver.authorizationMode)
+            "--use-service-account-credentials"} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+      };
+      path = top.path ++ [ pkgs.kubectl ];
+      unitConfig.ConditionPathExists = controllerManagerPaths;
+    };
+
+    systemd.paths.kube-controller-manager = {
+      wantedBy = [ "kube-controller-manager.service" ];
+      pathConfig = {
+        PathExists = controllerManagerPaths;
+        PathChanged = controllerManagerPaths;
+      };
+    };
+
+    services.kubernetes.pki.certs = with top.lib; {
+      controllerManager = mkCert {
+        name = "kube-controller-manager";
+        CN = "kube-controller-manager";
+        action = "systemctl restart kube-controller-manager.service";
+      };
+      controllerManagerClient = mkCert {
+        name = "kube-controller-manager-client";
+        CN = "system:kube-controller-manager";
+        action = "systemctl restart kube-controller-manager.service";
+      };
+    };
+
+    services.kubernetes.controllerManager.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/default.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/default.nix
new file mode 100644
index 000000000000..192c893f8a16
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/default.nix
@@ -0,0 +1,317 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.kubernetes;
+
+  mkKubeConfig = name: conf: pkgs.writeText "${name}-kubeconfig" (builtins.toJSON {
+    apiVersion = "v1";
+    kind = "Config";
+    clusters = [{
+      name = "local";
+      cluster.certificate-authority = conf.caFile or cfg.caFile;
+      cluster.server = conf.server;
+    }];
+    users = [{
+      inherit name;
+      user = {
+        client-certificate = conf.certFile;
+        client-key = conf.keyFile;
+      };
+    }];
+    contexts = [{
+      context = {
+        cluster = "local";
+        user = name;
+      };
+      current-context = "local";
+    }];
+  });
+
+  caCert = secret "ca";
+
+  etcdEndpoints = ["https://${cfg.masterAddress}:2379"];
+
+  mkCert = { name, CN, hosts ? [], fields ? {}, action ? "",
+             privateKeyOwner ? "kubernetes" }: rec {
+    inherit name caCert CN hosts fields action;
+    cert = secret name;
+    key = secret "${name}-key";
+    privateKeyOptions = {
+      owner = privateKeyOwner;
+      group = "nogroup";
+      mode = "0600";
+      path = key;
+    };
+  };
+
+  secret = name: "${cfg.secretsPath}/${name}.pem";
+
+  mkKubeConfigOptions = prefix: {
+    server = mkOption {
+      description = "${prefix} kube-apiserver server address.";
+      type = types.str;
+    };
+
+    caFile = mkOption {
+      description = "${prefix} certificate authority file used to connect to kube-apiserver.";
+      type = types.nullOr types.path;
+      default = cfg.caFile;
+    };
+
+    certFile = mkOption {
+      description = "${prefix} client certificate file used to connect to kube-apiserver.";
+      type = types.nullOr types.path;
+      default = null;
+    };
+
+    keyFile = mkOption {
+      description = "${prefix} client key file used to connect to kube-apiserver.";
+      type = types.nullOr types.path;
+      default = null;
+    };
+  };
+
+  kubeConfigDefaults = {
+    server = mkDefault cfg.kubeconfig.server;
+    caFile = mkDefault cfg.kubeconfig.caFile;
+    certFile = mkDefault cfg.kubeconfig.certFile;
+    keyFile = mkDefault cfg.kubeconfig.keyFile;
+  };
+in {
+
+  ###### interface
+
+  options.services.kubernetes = {
+    roles = mkOption {
+      description = ''
+        Kubernetes role that this machine should take.
+
+        Master role will enable etcd, apiserver, scheduler, controller manager
+        addon manager, flannel and proxy services.
+        Node role will enable flannel, docker, kubelet and proxy services.
+      '';
+      default = [];
+      type = types.listOf (types.enum ["master" "node"]);
+    };
+
+    package = mkOption {
+      description = "Kubernetes package to use.";
+      type = types.package;
+      default = pkgs.kubernetes;
+      defaultText = "pkgs.kubernetes";
+    };
+
+    kubeconfig = mkKubeConfigOptions "Default kubeconfig";
+
+    apiserverAddress = mkOption {
+      description = ''
+        Clusterwide accessible address for the kubernetes apiserver,
+        including protocol and optional port.
+      '';
+      example = "https://kubernetes-apiserver.example.com:6443";
+      type = types.str;
+    };
+
+    caFile = mkOption {
+      description = "Default kubernetes certificate authority";
+      type = types.nullOr types.path;
+      default = null;
+    };
+
+    dataDir = mkOption {
+      description = "Kubernetes root directory for managing kubelet files.";
+      default = "/var/lib/kubernetes";
+      type = types.path;
+    };
+
+    easyCerts = mkOption {
+      description = "Automatically setup x509 certificates and keys for the entire cluster.";
+      default = false;
+      type = types.bool;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates.";
+      default = [];
+      type = types.listOf types.str;
+    };
+
+    masterAddress = mkOption {
+      description = "Clusterwide available network address or hostname for the kubernetes master server.";
+      example = "master.example.com";
+      type = types.str;
+    };
+
+    path = mkOption {
+      description = "Packages added to the services' PATH environment variable. Both the bin and sbin subdirectories of each package are added.";
+      type = types.listOf types.package;
+      default = [];
+    };
+
+    clusterCidr = mkOption {
+      description = "Kubernetes controller manager and proxy CIDR Range for Pods in cluster.";
+      default = "10.1.0.0/16";
+      type = types.nullOr types.str;
+    };
+
+    lib = mkOption {
+      description = "Common functions for the kubernetes modules.";
+      default = {
+        inherit mkCert;
+        inherit mkKubeConfig;
+        inherit mkKubeConfigOptions;
+      };
+      type = types.attrs;
+    };
+
+    secretsPath = mkOption {
+      description = "Default location for kubernetes secrets. Not a store location.";
+      type = types.path;
+      default = cfg.dataDir + "/secrets";
+    };
+  };
+
+  ###### implementation
+
+  config = mkMerge [
+
+    (mkIf cfg.easyCerts {
+      services.kubernetes.pki.enable = mkDefault true;
+      services.kubernetes.caFile = caCert;
+    })
+
+    (mkIf (elem "master" cfg.roles) {
+      services.kubernetes.apiserver.enable = mkDefault true;
+      services.kubernetes.scheduler.enable = mkDefault true;
+      services.kubernetes.controllerManager.enable = mkDefault true;
+      services.kubernetes.addonManager.enable = mkDefault true;
+      services.kubernetes.proxy.enable = mkDefault true;
+      services.etcd.enable = true; # Cannot mkDefault because of flannel default options
+      services.kubernetes.kubelet = {
+        enable = mkDefault true;
+        taints = mkIf (!(elem "node" cfg.roles)) {
+          master = {
+            key = "node-role.kubernetes.io/master";
+            value = "true";
+            effect = "NoSchedule";
+          };
+        };
+      };
+    })
+
+
+    (mkIf (all (el: el == "master") cfg.roles) {
+      # if this node is only a master make it unschedulable by default
+      services.kubernetes.kubelet.unschedulable = mkDefault true;
+    })
+
+    (mkIf (elem "node" cfg.roles) {
+      services.kubernetes.kubelet.enable = mkDefault true;
+      services.kubernetes.proxy.enable = mkDefault true;
+    })
+
+    # Using "services.kubernetes.roles" will automatically enable easyCerts and flannel
+    (mkIf (cfg.roles != []) {
+      services.kubernetes.flannel.enable = mkDefault true;
+      services.flannel.etcd.endpoints = mkDefault etcdEndpoints;
+      services.kubernetes.easyCerts = mkDefault true;
+    })
+
+    (mkIf cfg.apiserver.enable {
+      services.kubernetes.pki.etcClusterAdminKubeconfig = mkDefault "kubernetes/cluster-admin.kubeconfig";
+      services.kubernetes.apiserver.etcd.servers = mkDefault etcdEndpoints;
+    })
+
+    (mkIf cfg.kubelet.enable {
+      virtualisation.docker = {
+        enable = mkDefault true;
+
+        # kubernetes needs access to logs
+        logDriver = mkDefault "json-file";
+
+        # iptables must be disabled for kubernetes
+        extraOptions = "--iptables=false --ip-masq=false";
+      };
+    })
+
+    (mkIf (cfg.apiserver.enable || cfg.controllerManager.enable) {
+      services.kubernetes.pki.certs = {
+        serviceAccount = mkCert {
+          name = "service-account";
+          CN = "system:service-account-signer";
+          action = ''
+            systemctl reload \
+              kube-apiserver.service \
+              kube-controller-manager.service
+          '';
+        };
+      };
+    })
+
+    (mkIf (
+        cfg.apiserver.enable ||
+        cfg.scheduler.enable ||
+        cfg.controllerManager.enable ||
+        cfg.kubelet.enable ||
+        cfg.proxy.enable ||
+        cfg.addonManager.enable
+    ) {
+      systemd.targets.kubernetes = {
+        description = "Kubernetes";
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      systemd.targets.kube-control-plane-online = {
+        wantedBy = [ "kubernetes.target" ];
+        before = [ "kubernetes.target" ];
+      };
+
+      systemd.services.kube-control-plane-online = rec {
+        description = "Kubernetes control plane is online";
+        wantedBy = [ "kube-control-plane-online.target" ];
+        after = [ "kube-scheduler.service" "kube-controller-manager.service" ];
+        before = [ "kube-control-plane-online.target" ];
+        environment.KUBECONFIG = cfg.lib.mkKubeConfig "default" cfg.kubeconfig;
+        path = [ pkgs.kubectl ];
+        preStart = ''
+          until kubectl get --raw=/healthz 2>/dev/null; do
+            echo kubectl get --raw=/healthz: exit status $?
+            sleep 3
+          done
+        '';
+        script = "echo Ok";
+        serviceConfig = {
+          TimeoutSec = "500";
+        };
+      };
+
+      systemd.tmpfiles.rules = [
+        "d /opt/cni/bin 0755 root root -"
+        "d /run/kubernetes 0755 kubernetes kubernetes -"
+        "d /var/lib/kubernetes 0755 kubernetes kubernetes -"
+      ];
+
+      users.users = singleton {
+        name = "kubernetes";
+        uid = config.ids.uids.kubernetes;
+        description = "Kubernetes user";
+        extraGroups = [ "docker" ];
+        group = "kubernetes";
+        home = cfg.dataDir;
+        createHome = true;
+      };
+      users.groups.kubernetes.gid = config.ids.gids.kubernetes;
+
+      # dns addon is enabled by default
+      services.kubernetes.addons.dns.enable = mkDefault true;
+
+      services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null
+                          then cfg.apiserver.advertiseAddress
+                          else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}");
+
+      services.kubernetes.kubeconfig.server = mkDefault cfg.apiserverAddress;
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/flannel.nix
new file mode 100644
index 000000000000..d9437427d6d1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/flannel.nix
@@ -0,0 +1,184 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.flannel;
+
+  # we want flannel to use kubernetes itself as configuration backend, not direct etcd
+  storageBackend = "kubernetes";
+
+  # needed for flannel to pass options to docker
+  mkDockerOpts = pkgs.runCommand "mk-docker-opts" {
+    buildInputs = [ pkgs.makeWrapper ];
+  } ''
+    mkdir -p $out
+    cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh
+
+    # bashInteractive needed for `compgen`
+    makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh"
+  '';
+in
+{
+  ###### interface
+  options.services.kubernetes.flannel = {
+    enable = mkEnableOption "flannel networking";
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes flannel";
+  };
+
+  ###### implementation
+  config = let
+
+    flannelPaths = filter (a: a != null) [
+      cfg.kubeconfig.caFile
+      cfg.kubeconfig.certFile
+      cfg.kubeconfig.keyFile
+    ];
+    kubeconfig = top.lib.mkKubeConfig "flannel" cfg.kubeconfig;
+
+  in mkIf cfg.enable {
+    services.flannel = {
+
+      enable = mkDefault true;
+      network = mkDefault top.clusterCidr;
+      inherit storageBackend kubeconfig;
+      nodeName = top.kubelet.hostname;
+    };
+
+    services.kubernetes.kubelet = {
+      networkPlugin = mkDefault "cni";
+      cni.config = mkDefault [{
+        name = "mynet";
+        type = "flannel";
+        delegate = {
+          isDefaultGateway = true;
+          bridge = "docker0";
+        };
+      }];
+    };
+
+    systemd.services.mk-docker-opts = {
+      description = "Pre-Docker Actions";
+      wantedBy = [ "flannel.target" ];
+      before = [ "flannel.target" ];
+      path = with pkgs; [ gawk gnugrep ];
+      script = ''
+        ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
+        systemctl restart docker
+      '';
+      unitConfig.ConditionPathExists = [ "/run/flannel/subnet.env" ];
+      serviceConfig.Type = "oneshot";
+    };
+
+    systemd.paths.flannel-subnet-env = {
+      wantedBy = [ "mk-docker-opts.service" ];
+      pathConfig = {
+        PathExists = [ "/run/flannel/subnet.env" ];
+        PathChanged = [ "/run/flannel/subnet.env" ];
+        Unit = "mk-docker-opts.service";
+      };
+    };
+
+    systemd.targets.flannel = {
+      wantedBy = [ "kube-node-online.target" ];
+      before = [ "kube-node-online.target" ];
+    };
+
+    systemd.services.flannel = {
+      wantedBy = [ "flannel.target" ];
+      after = [ "kubelet.target" ];
+      before = [ "flannel.target" ];
+      path = with pkgs; [ iptables kubectl ];
+      environment.KUBECONFIG = kubeconfig;
+      preStart = let
+        args = [
+          "--selector=kubernetes.io/hostname=${top.kubelet.hostname}"
+          # flannel exits if node is not registered yet, before that there is no podCIDR
+          "--output=jsonpath={.items[0].spec.podCIDR}"
+          # if jsonpath cannot be resolved exit with status 1
+          "--allow-missing-template-keys=false"
+        ];
+      in ''
+        until kubectl get nodes ${concatStringsSep " " args} 2>/dev/null; do
+          echo Waiting for ${top.kubelet.hostname} to be RegisteredNode
+          sleep 1
+        done
+      '';
+      unitConfig.ConditionPathExists = flannelPaths;
+    };
+
+    systemd.paths.flannel = {
+      wantedBy = [ "flannel.service" ];
+      pathConfig = {
+        PathExists = flannelPaths;
+        PathChanged = flannelPaths;
+      };
+    };
+
+    services.kubernetes.flannel.kubeconfig.server = mkDefault top.apiserverAddress;
+
+    systemd.services.docker = {
+      environment.DOCKER_OPTS = "-b none";
+      serviceConfig.EnvironmentFile = "-/run/flannel/docker";
+    };
+
+    # read environment variables generated by mk-docker-opts
+    virtualisation.docker.extraOptions = "$DOCKER_OPTS";
+
+    networking = {
+      firewall.allowedUDPPorts = [
+        8285  # flannel udp
+        8472  # flannel vxlan
+      ];
+      dhcpcd.denyInterfaces = [ "docker*" "flannel*" ];
+    };
+
+    services.kubernetes.pki.certs = {
+      flannelClient = top.lib.mkCert {
+        name = "flannel-client";
+        CN = "flannel-client";
+        action = "systemctl restart flannel.service";
+      };
+    };
+
+    # give flannel som kubernetes rbac permissions if applicable
+    services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
+      flannel-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRole";
+        metadata = { name = "flannel"; };
+        rules = [{
+          apiGroups = [ "" ];
+          resources = [ "pods" ];
+          verbs = [ "get" ];
+        }
+        {
+          apiGroups = [ "" ];
+          resources = [ "nodes" ];
+          verbs = [ "list" "watch" ];
+        }
+        {
+          apiGroups = [ "" ];
+          resources = [ "nodes/status" ];
+          verbs = [ "patch" ];
+        }];
+      };
+
+      flannel-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRoleBinding";
+        metadata = { name = "flannel"; };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "flannel";
+        };
+        subjects = [{
+          kind = "User";
+          name = "flannel-client";
+        }];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix
new file mode 100644
index 000000000000..2a4a0624555d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix
@@ -0,0 +1,426 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.kubelet;
+
+  cniConfig =
+    if cfg.cni.config != [] && !(isNull cfg.cni.configDir) then
+      throw "Verbatim CNI-config and CNI configDir cannot both be set."
+    else if !(isNull cfg.cni.configDir) then
+      cfg.cni.configDir
+    else
+      (pkgs.buildEnv {
+        name = "kubernetes-cni-config";
+        paths = imap (i: entry:
+          pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
+        ) cfg.cni.config;
+      });
+
+  infraContainer = pkgs.dockerTools.buildImage {
+    name = "pause";
+    tag = "latest";
+    contents = top.package.pause;
+    config.Cmd = "/bin/pause";
+  };
+
+  kubeconfig = top.lib.mkKubeConfig "kubelet" cfg.kubeconfig;
+
+  manifests = pkgs.buildEnv {
+    name = "kubernetes-manifests";
+    paths = mapAttrsToList (name: manifest:
+      pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest)
+    ) cfg.manifests;
+  };
+
+  manifestPath = "kubernetes/manifests";
+
+  taintOptions = with lib.types; { name, ... }: {
+    options = {
+      key = mkOption {
+        description = "Key of taint.";
+        default = name;
+        type = str;
+      };
+      value = mkOption {
+        description = "Value of taint.";
+        type = str;
+      };
+      effect = mkOption {
+        description = "Effect of taint.";
+        example = "NoSchedule";
+        type = enum ["NoSchedule" "PreferNoSchedule" "NoExecute"];
+      };
+    };
+  };
+
+  taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.taints);
+in
+{
+  ###### interface
+  options.services.kubernetes.kubelet = with lib.types; {
+
+    address = mkOption {
+      description = "Kubernetes kubelet info server listening address.";
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    allowPrivileged = mkOption {
+      description = "Whether to allow Kubernetes containers to request privileged mode.";
+      default = false;
+      type = bool;
+    };
+
+    clusterDns = mkOption {
+      description = "Use alternative DNS.";
+      default = "10.1.0.1";
+      type = str;
+    };
+
+    clusterDomain = mkOption {
+      description = "Use alternative domain.";
+      default = config.services.kubernetes.addons.dns.clusterDomain;
+      type = str;
+    };
+
+    clientCaFile = mkOption {
+      description = "Kubernetes apiserver CA file for client authentication.";
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    cni = {
+      packages = mkOption {
+        description = "List of network plugin packages to install.";
+        type = listOf package;
+        default = [];
+      };
+
+      config = mkOption {
+        description = "Kubernetes CNI configuration.";
+        type = listOf attrs;
+        default = [];
+        example = literalExample ''
+          [{
+            "cniVersion": "0.2.0",
+            "name": "mynet",
+            "type": "bridge",
+            "bridge": "cni0",
+            "isGateway": true,
+            "ipMasq": true,
+            "ipam": {
+                "type": "host-local",
+                "subnet": "10.22.0.0/16",
+                "routes": [
+                    { "dst": "0.0.0.0/0" }
+                ]
+            }
+          } {
+            "cniVersion": "0.2.0",
+            "type": "loopback"
+          }]
+        '';
+      };
+
+      configDir = mkOption {
+        description = "Path to Kubernetes CNI configuration directory.";
+        type = nullOr path;
+        default = null;
+      };
+    };
+
+    enable = mkEnableOption "Kubernetes kubelet.";
+
+    extraOpts = mkOption {
+      description = "Kubernetes kubelet extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    healthz = {
+      bind = mkOption {
+        description = "Kubernetes kubelet healthz listening address.";
+        default = "127.0.0.1";
+        type = str;
+      };
+
+      port = mkOption {
+        description = "Kubernetes kubelet healthz port.";
+        default = 10248;
+        type = int;
+      };
+    };
+
+    hostname = mkOption {
+      description = "Kubernetes kubelet hostname override.";
+      default = config.networking.hostName;
+      type = str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubelet";
+
+    manifests = mkOption {
+      description = "List of manifests to bootstrap with kubelet (only pods can be created as manifest entry)";
+      type = attrsOf attrs;
+      default = {};
+    };
+
+    networkPlugin = mkOption {
+      description = "Network plugin to use by Kubernetes.";
+      type = nullOr (enum ["cni" "kubenet"]);
+      default = "kubenet";
+    };
+
+    nodeIp = mkOption {
+      description = "IP address of the node. If set, kubelet will use this IP address for the node.";
+      default = null;
+      type = nullOr str;
+    };
+
+    registerNode = mkOption {
+      description = "Whether to auto register kubelet with API server.";
+      default = true;
+      type = bool;
+    };
+
+    port = mkOption {
+      description = "Kubernetes kubelet info server listening port.";
+      default = 10250;
+      type = int;
+    };
+
+    seedDockerImages = mkOption {
+      description = "List of docker images to preload on system";
+      default = [];
+      type = listOf package;
+    };
+
+    taints = mkOption {
+      description = "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).";
+      default = {};
+      type = attrsOf (submodule [ taintOptions ]);
+    };
+
+    tlsCertFile = mkOption {
+      description = "File containing x509 Certificate for HTTPS.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = "File containing x509 private key matching tlsCertFile.";
+      default = null;
+      type = nullOr path;
+    };
+
+    unschedulable = mkOption {
+      description = "Whether to set node taint to unschedulable=true as it is the case of node that has only master role.";
+      default = false;
+      type = bool;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkMerge [
+    (let
+
+      kubeletPaths = filter (a: a != null) [
+        cfg.kubeconfig.caFile
+        cfg.kubeconfig.certFile
+        cfg.kubeconfig.keyFile
+        cfg.clientCaFile
+        cfg.tlsCertFile
+        cfg.tlsKeyFile
+      ];
+
+    in mkIf cfg.enable {
+      services.kubernetes.kubelet.seedDockerImages = [infraContainer];
+
+      systemd.services.kubelet = {
+        description = "Kubernetes Kubelet Service";
+        wantedBy = [ "kubelet.target" ];
+        after = [ "kube-control-plane-online.target" ];
+        before = [ "kubelet.target" ];
+        path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
+        preStart = ''
+          rm -f /opt/cni/bin/* || true
+          ${concatMapStrings (package: ''
+            echo "Linking cni package: ${package}"
+            ln -fs ${package}/bin/* /opt/cni/bin
+          '') cfg.cni.packages}
+        '';
+        serviceConfig = {
+          Slice = "kubernetes.slice";
+          CPUAccounting = true;
+          MemoryAccounting = true;
+          Restart = "on-failure";
+          RestartSec = "1000ms";
+          ExecStart = ''${top.package}/bin/kubelet \
+            --address=${cfg.address} \
+            --allow-privileged=${boolToString cfg.allowPrivileged} \
+            --authentication-token-webhook \
+            --authentication-token-webhook-cache-ttl="10s" \
+            --authorization-mode=Webhook \
+            ${optionalString (cfg.clientCaFile != null)
+              "--client-ca-file=${cfg.clientCaFile}"} \
+            ${optionalString (cfg.clusterDns != "")
+              "--cluster-dns=${cfg.clusterDns}"} \
+            ${optionalString (cfg.clusterDomain != "")
+              "--cluster-domain=${cfg.clusterDomain}"} \
+            --cni-conf-dir=${cniConfig} \
+            ${optionalString (cfg.featureGates != [])
+              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+            --hairpin-mode=hairpin-veth \
+            --healthz-bind-address=${cfg.healthz.bind} \
+            --healthz-port=${toString cfg.healthz.port} \
+            --hostname-override=${cfg.hostname} \
+            --kubeconfig=${kubeconfig} \
+            ${optionalString (cfg.networkPlugin != null)
+              "--network-plugin=${cfg.networkPlugin}"} \
+            ${optionalString (cfg.nodeIp != null)
+              "--node-ip=${cfg.nodeIp}"} \
+            --pod-infra-container-image=pause \
+            ${optionalString (cfg.manifests != {})
+              "--pod-manifest-path=/etc/${manifestPath}"} \
+            --port=${toString cfg.port} \
+            --register-node=${boolToString cfg.registerNode} \
+            ${optionalString (taints != "")
+              "--register-with-taints=${taints}"} \
+            --root-dir=${top.dataDir} \
+            ${optionalString (cfg.tlsCertFile != null)
+              "--tls-cert-file=${cfg.tlsCertFile}"} \
+            ${optionalString (cfg.tlsKeyFile != null)
+              "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+            ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+            ${cfg.extraOpts}
+          '';
+          WorkingDirectory = top.dataDir;
+        };
+        unitConfig.ConditionPathExists = kubeletPaths;
+      };
+
+      systemd.paths.kubelet = {
+        wantedBy =  [ "kubelet.service" ];
+        pathConfig = {
+          PathExists = kubeletPaths;
+          PathChanged = kubeletPaths;
+        };
+      };
+
+      systemd.services.docker.before = [ "kubelet.service" ];
+
+      systemd.services.docker-seed-images = {
+        wantedBy = [ "docker.service" ];
+        after = [ "docker.service" ];
+        before = [ "kubelet.service" ];
+        path = with pkgs; [ docker ];
+        preStart = ''
+          ${concatMapStrings (img: ''
+            echo "Seeding docker image: ${img}"
+            docker load <${img}
+          '') cfg.seedDockerImages}
+        '';
+        script = "echo Ok";
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+        serviceConfig.Slice = "kubernetes.slice";
+      };
+
+      systemd.services.kubelet-online = {
+        wantedBy = [ "kube-node-online.target" ];
+        after = [ "flannel.target" "kubelet.target" ];
+        before = [ "kube-node-online.target" ];
+        # it is complicated. flannel needs kubelet to run the pause container before
+        # it discusses the node CIDR with apiserver and afterwards configures and restarts
+        # dockerd. Until then prevent creating any pods because they have to be recreated anyway
+        # because the network of docker0 has been changed by flannel.
+        script = let
+          docker-env = "/run/flannel/docker";
+          flannel-date = "stat --print=%Y ${docker-env}";
+          docker-date = "systemctl show --property=ActiveEnterTimestamp --value docker";
+        in ''
+          until test -f ${docker-env} ; do sleep 1 ; done
+          while test `${flannel-date}` -gt `date +%s --date="$(${docker-date})"` ; do
+            sleep 1
+          done
+        '';
+        serviceConfig.Type = "oneshot";
+        serviceConfig.Slice = "kubernetes.slice";
+      };
+
+      # Allways include cni plugins
+      services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins];
+
+      boot.kernelModules = ["br_netfilter"];
+
+      services.kubernetes.kubelet.hostname = with config.networking;
+        mkDefault (hostName + optionalString (!isNull domain) ".${domain}");
+
+      services.kubernetes.pki.certs = with top.lib; {
+        kubelet = mkCert {
+          name = "kubelet";
+          CN = top.kubelet.hostname;
+          action = "systemctl restart kubelet.service";
+
+        };
+        kubeletClient = mkCert {
+          name = "kubelet-client";
+          CN = "system:node:${top.kubelet.hostname}";
+          fields = {
+            O = "system:nodes";
+          };
+          action = "systemctl restart kubelet.service";
+        };
+      };
+
+      services.kubernetes.kubelet.kubeconfig.server = mkDefault top.apiserverAddress;
+    })
+
+    (mkIf (cfg.enable && cfg.manifests != {}) {
+      environment.etc = mapAttrs' (name: manifest:
+        nameValuePair "${manifestPath}/${name}.json" {
+          text = builtins.toJSON manifest;
+          mode = "0755";
+        }
+      ) cfg.manifests;
+    })
+
+    (mkIf (cfg.unschedulable && cfg.enable) {
+      services.kubernetes.kubelet.taints.unschedulable = {
+        value = "true";
+        effect = "NoSchedule";
+      };
+    })
+
+    {
+      systemd.targets.kubelet = {
+        wantedBy = [ "kube-node-online.target" ];
+        before = [ "kube-node-online.target" ];
+      };
+
+      systemd.targets.kube-node-online = {
+        wantedBy = [ "kubernetes.target" ];
+        before = [ "kubernetes.target" ];
+      };
+    }
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix
new file mode 100644
index 000000000000..32eacad9025f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -0,0 +1,407 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.pki;
+
+  csrCA = pkgs.writeText "kube-pki-cacert-csr.json" (builtins.toJSON {
+    key = {
+        algo = "rsa";
+        size = 2048;
+    };
+    names = singleton cfg.caSpec;
+  });
+
+  csrCfssl = pkgs.writeText "kube-pki-cfssl-csr.json" (builtins.toJSON {
+    key = {
+        algo = "rsa";
+        size = 2048;
+    };
+    CN = top.masterAddress;
+  });
+
+  cfsslAPITokenBaseName = "apitoken.secret";
+  cfsslAPITokenPath = "${config.services.cfssl.dataDir}/${cfsslAPITokenBaseName}";
+  certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
+  cfsslAPITokenLength = 32;
+
+  clusterAdminKubeconfig = with cfg.certs.clusterAdmin; {
+    server = top.apiserverAddress;
+    certFile = cert;
+    keyFile = key;
+  };
+
+  remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
+in
+{
+  ###### interface
+  options.services.kubernetes.pki = with lib.types; {
+
+    enable = mkEnableOption "easyCert issuer service";
+
+    certs = mkOption {
+      description = "List of certificate specs to feed to cert generator.";
+      default = {};
+      type = attrs;
+    };
+
+    genCfsslCACert = mkOption {
+      description = ''
+        Whether to automatically generate cfssl CA certificate and key,
+        if they don't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    genCfsslAPICerts = mkOption {
+      description = ''
+        Whether to automatically generate cfssl API webserver TLS cert and key,
+        if they don't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    genCfsslAPIToken = mkOption {
+      description = ''
+        Whether to automatically generate cfssl API-token secret,
+        if they doesn't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    pkiTrustOnBootstrap = mkOption {
+      description = "Whether to always trust remote cfssl server upon initial PKI bootstrap.";
+      default = true;
+      type = bool;
+    };
+
+    caCertPathPrefix = mkOption {
+      description = ''
+        Path-prefrix for the CA-certificate to be used for cfssl signing.
+        Suffixes ".pem" and "-key.pem" will be automatically appended for
+        the public and private keys respectively.
+      '';
+      default = "${config.services.cfssl.dataDir}/ca";
+      type = str;
+    };
+
+    caSpec = mkOption {
+      description = "Certificate specification for the auto-generated CAcert.";
+      default = {
+        CN = "kubernetes-cluster-ca";
+        O = "NixOS";
+        OU = "services.kubernetes.pki.caSpec";
+        L = "auto-generated";
+      };
+      type = attrs;
+    };
+
+    etcClusterAdminKubeconfig = mkOption {
+      description = ''
+        Symlink a kubeconfig with cluster-admin privileges to environment path
+        (/etc/&lt;path&gt;).
+      '';
+      default = null;
+      type = nullOr str;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable
+  (let
+    cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
+    cfsslCert = "${cfsslCertPathPrefix}.pem";
+    cfsslKey = "${cfsslCertPathPrefix}-key.pem";
+    cfsslPort = toString config.services.cfssl.port;
+
+    certmgrPaths = [
+      top.caFile
+      certmgrAPITokenPath
+    ];
+  in
+  {
+
+    services.cfssl = mkIf (top.apiserver.enable) {
+      enable = true;
+      address = "0.0.0.0";
+      tlsCert = cfsslCert;
+      tlsKey = cfsslKey;
+      configFile = toString (pkgs.writeText "cfssl-config.json" (builtins.toJSON {
+        signing = {
+          profiles = {
+            default = {
+              usages = ["digital signature"];
+              auth_key = "default";
+              expiry = "720h";
+            };
+          };
+        };
+        auth_keys = {
+          default = {
+            type = "standard";
+            key = "file:${cfsslAPITokenPath}";
+          };
+        };
+      }));
+    };
+
+    systemd.services.cfssl.preStart = with pkgs; with config.services.cfssl; mkIf (top.apiserver.enable)
+    (concatStringsSep "\n" [
+      "set -e"
+      (optionalString cfg.genCfsslCACert ''
+        if [ ! -f "${cfg.caCertPathPrefix}.pem" ]; then
+          ${cfssl}/bin/cfssl genkey -initca ${csrCA} | \
+            ${cfssl}/bin/cfssljson -bare ${cfg.caCertPathPrefix}
+        fi
+      '')
+      (optionalString cfg.genCfsslAPICerts ''
+        if [ ! -f "${dataDir}/cfssl.pem" ]; then
+          ${cfssl}/bin/cfssl gencert -ca "${cfg.caCertPathPrefix}.pem" -ca-key "${cfg.caCertPathPrefix}-key.pem" ${csrCfssl} | \
+            ${cfssl}/bin/cfssljson -bare ${cfsslCertPathPrefix}
+        fi
+      '')
+      (optionalString cfg.genCfsslAPIToken ''
+        if [ ! -f "${cfsslAPITokenPath}" ]; then
+          head -c ${toString (cfsslAPITokenLength / 2)} /dev/urandom | od -An -t x | tr -d ' ' >"${cfsslAPITokenPath}"
+        fi
+        chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
+      '')]);
+
+    systemd.targets.cfssl-online = {
+      wantedBy = [ "network-online.target" ];
+      after = [ "cfssl.service" "network-online.target" "cfssl-online.service" ];
+    };
+
+    systemd.services.cfssl-online = {
+      description = "Wait for ${remote} to be reachable.";
+      wantedBy = [ "cfssl-online.target" ];
+      before = [ "cfssl-online.target" ];
+      path = [ pkgs.curl ];
+      preStart = ''
+        until curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o /dev/null; do
+          echo curl ${remote}/api/v1/cfssl/info: exit status $?
+          sleep 2
+        done
+      '';
+      script = "echo Ok";
+      serviceConfig = {
+        TimeoutSec = "300";
+      };
+    };
+
+    systemd.services.kube-certmgr-bootstrap = {
+      description = "Kubernetes certmgr bootstrapper";
+      wantedBy = [ "cfssl-online.target" ];
+      after = [ "cfssl-online.target" ];
+      before = [ "certmgr.service" ];
+      path = with pkgs; [ curl cfssl ];
+      script = concatStringsSep "\n" [''
+        set -e
+
+        mkdir -p $(dirname ${certmgrAPITokenPath})
+        mkdir -p $(dirname ${top.caFile})
+
+        # If there's a cfssl (cert issuer) running locally, then don't rely on user to
+        # manually paste it in place. Just symlink.
+        # otherwise, create the target file, ready for users to insert the token
+
+        if [ -f "${cfsslAPITokenPath}" ]; then
+          ln -fs "${cfsslAPITokenPath}" "${certmgrAPITokenPath}"
+        else
+          touch "${certmgrAPITokenPath}" && chmod 600 "${certmgrAPITokenPath}"
+        fi
+      ''
+      (optionalString (cfg.pkiTrustOnBootstrap) ''
+        if [ ! -s "${top.caFile}" ]; then
+          until test -s ${top.caFile}.json; do
+            sleep 2
+            curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o ${top.caFile}.json
+          done
+          cfssljson -f ${top.caFile}.json -stdout >${top.caFile}
+          rm ${top.caFile}.json
+        fi
+      '')
+      ];
+      serviceConfig = {
+        TimeoutSec = "500";
+      };
+    };
+
+    services.certmgr = {
+      enable = true;
+      package = pkgs.certmgr-selfsigned;
+      svcManager = "command";
+      specs =
+        let
+          mkSpec = _: cert: {
+            inherit (cert) action;
+            authority = {
+              inherit remote;
+              file.path = cert.caCert;
+              root_ca = cert.caCert;
+              profile = "default";
+              auth_key_file = certmgrAPITokenPath;
+            };
+            certificate = {
+              path = cert.cert;
+            };
+            private_key = cert.privateKeyOptions;
+            request = {
+              inherit (cert) CN hosts;
+              key = {
+                algo = "rsa";
+                size = 2048;
+              };
+              names = [ cert.fields ];
+            };
+          };
+        in
+          mapAttrs mkSpec cfg.certs;
+      };
+
+      systemd.services.certmgr = {
+        wantedBy = [ "cfssl-online.target" ];
+        after = [ "cfssl-online.target" "kube-certmgr-bootstrap.service" ];
+        preStart = ''
+          while ! test -s ${certmgrAPITokenPath} ; do
+            sleep 1
+            echo Waiting for ${certmgrAPITokenPath}
+          done
+        '';
+        unitConfig.ConditionPathExists = certmgrPaths;
+      };
+
+      systemd.paths.certmgr = {
+        wantedBy = [ "certmgr.service" ];
+        pathConfig = {
+          PathExists = certmgrPaths;
+          PathChanged = certmgrPaths;
+        };
+      };
+
+      environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
+        (top.lib.mkKubeConfig "cluster-admin" clusterAdminKubeconfig);
+
+      environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
+      (pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
+        set -e
+        exec 1>&2
+
+        if [ $# -gt 0 ]; then
+          echo "Usage: $(basename $0)"
+          echo ""
+          echo "No args. Apitoken must be provided on stdin."
+          echo "To get the apitoken, execute: 'sudo cat ${certmgrAPITokenPath}' on the master node."
+          exit 1
+        fi
+
+        if [ $(id -u) != 0 ]; then
+          echo "Run as root please."
+          exit 1
+        fi
+
+        read -r token
+        if [ ''${#token} != ${toString cfsslAPITokenLength} ]; then
+          echo "Token must be of length ${toString cfsslAPITokenLength}."
+          exit 1
+        fi
+
+        do_restart=$(test -s ${certmgrAPITokenPath} && echo -n y || echo -n n)
+
+        echo $token > ${certmgrAPITokenPath}
+        chmod 600 ${certmgrAPITokenPath}
+
+        if [ y = $do_restart ]; then
+          echo "Restarting certmgr..." >&1
+          systemctl restart certmgr
+        fi
+
+        echo "Node joined succesfully" >&1
+      '')];
+
+      # isolate etcd on loopback at the master node
+      # easyCerts doesn't support multimaster clusters anyway atm.
+      services.etcd = mkIf top.apiserver.enable (with cfg.certs.etcd; {
+        listenClientUrls = ["https://127.0.0.1:2379"];
+        listenPeerUrls = ["https://127.0.0.1:2380"];
+        advertiseClientUrls = ["https://etcd.local:2379"];
+        initialCluster = ["${top.masterAddress}=https://etcd.local:2380"];
+        initialAdvertisePeerUrls = ["https://etcd.local:2380"];
+        certFile = mkDefault cert;
+        keyFile = mkDefault key;
+        trustedCaFile = mkDefault caCert;
+      });
+      networking.extraHosts = mkIf (config.services.etcd.enable) ''
+        127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
+      '';
+
+      services.kubernetes = {
+
+        apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
+          etcd = with cfg.certs.apiserverEtcdClient; {
+            servers = ["https://etcd.local:2379"];
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+            caFile = mkDefault caCert;
+          };
+          clientCaFile = mkDefault caCert;
+          tlsCertFile = mkDefault cert;
+          tlsKeyFile = mkDefault key;
+          serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.cert;
+          kubeletClientCaFile = mkDefault caCert;
+          kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
+          kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
+          proxyClientCertFile = mkDefault cfg.certs.apiserverProxyClient.cert;
+          proxyClientKeyFile = mkDefault cfg.certs.apiserverProxyClient.key;
+        });
+        addonManager = mkIf top.addonManager.enable {
+          kubeconfig = with cfg.certs.addonManager; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+          bootstrapAddonsKubeconfig = clusterAdminKubeconfig;
+        };
+        controllerManager = mkIf top.controllerManager.enable {
+          serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
+          rootCaFile = cfg.certs.controllerManagerClient.caCert;
+          kubeconfig = with cfg.certs.controllerManagerClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        flannel = mkIf top.flannel.enable {
+          kubeconfig = with cfg.certs.flannelClient; {
+            certFile = cert;
+            keyFile = key;
+          };
+        };
+        scheduler = mkIf top.scheduler.enable {
+          kubeconfig = with cfg.certs.schedulerClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        kubelet = mkIf top.kubelet.enable {
+          clientCaFile = mkDefault cfg.certs.kubelet.caCert;
+          tlsCertFile = mkDefault cfg.certs.kubelet.cert;
+          tlsKeyFile = mkDefault cfg.certs.kubelet.key;
+          kubeconfig = with cfg.certs.kubeletClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        proxy = mkIf top.proxy.enable {
+          kubeconfig = with cfg.certs.kubeProxyClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+      };
+    });
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/proxy.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/proxy.nix
new file mode 100644
index 000000000000..23f4d97b7030
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/proxy.nix
@@ -0,0 +1,107 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.proxy;
+in
+{
+
+  ###### interface
+  options.services.kubernetes.proxy = with lib.types; {
+
+    bindAddress = mkOption {
+      description = "Kubernetes proxy listening address.";
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    enable = mkEnableOption "Kubernetes proxy";
+
+    extraOpts = mkOption {
+      description = "Kubernetes proxy extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes proxy";
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = let
+
+    proxyPaths = filter (a: a != null) [
+      cfg.kubeconfig.caFile
+      cfg.kubeconfig.certFile
+      cfg.kubeconfig.keyFile
+    ];
+
+  in mkIf cfg.enable {
+    systemd.services.kube-proxy = rec {
+      description = "Kubernetes Proxy Service";
+      wantedBy = [ "kube-node-online.target" ];
+      after = [ "kubelet-online.service" ];
+      before = [ "kube-node-online.target" ];
+      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig;
+      path = with pkgs; [ iptables conntrack_tools kubectl ];
+      preStart = ''
+        until kubectl auth can-i get nodes/${top.kubelet.hostname} -q 2>/dev/null; do
+          echo kubectl auth can-i get nodes/${top.kubelet.hostname}: exit status $?
+          sleep 2
+        done
+      '';
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-proxy \
+          --bind-address=${cfg.bindAddress} \
+          ${optionalString (top.clusterCidr!=null)
+            "--cluster-cidr=${top.clusterCidr}"} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${environment.KUBECONFIG} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        Restart = "on-failure";
+        RestartSec = 5;
+      };
+      unitConfig.ConditionPathExists = proxyPaths;
+    };
+
+    systemd.paths.kube-proxy = {
+      wantedBy = [ "kube-proxy.service" ];
+      pathConfig = {
+        PathExists = proxyPaths;
+        PathChanged = proxyPaths;
+      };
+    };
+
+    services.kubernetes.pki.certs = {
+      kubeProxyClient = top.lib.mkCert {
+        name = "kube-proxy-client";
+        CN = "system:kube-proxy";
+        action = "systemctl restart kube-proxy.service";
+      };
+    };
+
+    services.kubernetes.proxy.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/scheduler.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/scheduler.nix
new file mode 100644
index 000000000000..a0e484542951
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/scheduler.nix
@@ -0,0 +1,120 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.scheduler;
+in
+{
+  ###### interface
+  options.services.kubernetes.scheduler = with lib.types; {
+
+    address = mkOption {
+      description = "Kubernetes scheduler listening address.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    enable = mkEnableOption "Kubernetes scheduler";
+
+    extraOpts = mkOption {
+      description = "Kubernetes scheduler extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes scheduler";
+
+    leaderElect = mkOption {
+      description = "Whether to start leader election before executing main loop.";
+      type = bool;
+      default = true;
+    };
+
+    port = mkOption {
+      description = "Kubernetes scheduler listening port.";
+      default = 10251;
+      type = int;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config =  let
+
+    schedulerPaths = filter (a: a != null) [
+      cfg.kubeconfig.caFile
+      cfg.kubeconfig.certFile
+      cfg.kubeconfig.keyFile
+    ];
+
+  in mkIf cfg.enable {
+    systemd.services.kube-scheduler = rec {
+      description = "Kubernetes Scheduler Service";
+      wantedBy = [ "kube-control-plane-online.target" ];
+      after = [ "kube-apiserver.service" ];
+      before = [ "kube-control-plane-online.target" ];
+      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig;
+      path = [ pkgs.kubectl ];
+      preStart = ''
+        until kubectl auth can-i get /api -q 2>/dev/null; do
+          echo kubectl auth can-i get /api: exit status $?
+          sleep 2
+        done
+      '';
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-scheduler \
+          --address=${cfg.address} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${environment.KUBECONFIG} \
+          --leader-elect=${boolToString cfg.leaderElect} \
+          --port=${toString cfg.port} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+        Restart = "on-failure";
+        RestartSec = 5;
+      };
+      unitConfig.ConditionPathExists = schedulerPaths;
+    };
+
+    systemd.paths.kube-scheduler = {
+      wantedBy = [ "kube-scheduler.service" ];
+      pathConfig = {
+        PathExists = schedulerPaths;
+        PathChanged = schedulerPaths;
+      };
+    };
+
+    services.kubernetes.pki.certs = {
+      schedulerClient = top.lib.mkCert {
+        name = "kube-scheduler-client";
+        CN = "system:kube-scheduler";
+        action = "systemctl restart kube-scheduler.service";
+      };
+    };
+
+    services.kubernetes.scheduler.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+}