about summary refs log tree commit diff
path: root/nixos/modules
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules')
-rw-r--r--nixos/modules/config/gtk/gtk-icon-cache.nix2
-rw-r--r--nixos/modules/installer/tools/nix-fallback-paths.nix8
-rw-r--r--nixos/modules/module-list.nix1
-rw-r--r--nixos/modules/programs/plotinus.nix2
-rw-r--r--nixos/modules/programs/plotinus.xml4
-rw-r--r--nixos/modules/rename.nix1
-rw-r--r--nixos/modules/services/cluster/kubernetes/addon-manager.nix85
-rw-r--r--nixos/modules/services/cluster/kubernetes/addons/dashboard.nix36
-rw-r--r--nixos/modules/services/cluster/kubernetes/apiserver.nix48
-rw-r--r--nixos/modules/services/cluster/kubernetes/controller-manager.nix39
-rw-r--r--nixos/modules/services/cluster/kubernetes/default.nix25
-rw-r--r--nixos/modules/services/cluster/kubernetes/flannel.nix73
-rw-r--r--nixos/modules/services/cluster/kubernetes/kubelet.nix93
-rw-r--r--nixos/modules/services/cluster/kubernetes/pki.nix166
-rw-r--r--nixos/modules/services/cluster/kubernetes/proxy.nix37
-rw-r--r--nixos/modules/services/cluster/kubernetes/scheduler.nix34
-rw-r--r--nixos/modules/services/databases/postgresql.nix4
-rw-r--r--nixos/modules/services/editors/emacs.xml6
-rw-r--r--nixos/modules/services/misc/zookeeper.nix1
-rw-r--r--nixos/modules/services/network-filesystems/ceph.nix110
-rw-r--r--nixos/modules/services/web-servers/darkhttpd.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/enlightenment.nix4
-rw-r--r--nixos/modules/services/x11/desktop-managers/mate.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/xfce.nix6
-rw-r--r--nixos/modules/services/x11/desktop-managers/xfce4-14.nix4
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix14
-rw-r--r--nixos/modules/virtualisation/amazon-options.nix9
-rw-r--r--nixos/modules/virtualisation/railcar.nix125
28 files changed, 378 insertions, 563 deletions
diff --git a/nixos/modules/config/gtk/gtk-icon-cache.nix b/nixos/modules/config/gtk/gtk-icon-cache.nix
index 9c5d993b9c59..86a6bfb5af41 100644
--- a/nixos/modules/config/gtk/gtk-icon-cache.nix
+++ b/nixos/modules/config/gtk/gtk-icon-cache.nix
@@ -7,7 +7,7 @@ with lib;
       type = types.bool;
       default = config.services.xserver.enable;
       description = ''
-        Whether to build icon theme caches for GTK+ applications.
+        Whether to build icon theme caches for GTK applications.
       '';
     };
   };
diff --git a/nixos/modules/installer/tools/nix-fallback-paths.nix b/nixos/modules/installer/tools/nix-fallback-paths.nix
index b9ab2053c41f..2673887d2b96 100644
--- a/nixos/modules/installer/tools/nix-fallback-paths.nix
+++ b/nixos/modules/installer/tools/nix-fallback-paths.nix
@@ -1,6 +1,6 @@
 {
-  x86_64-linux = "/nix/store/hbhdjn5ik3byg642d1m11k3k3s0kn3py-nix-2.2.2";
-  i686-linux = "/nix/store/fz5cikwvj3n0a6zl44h6l2z3cin64mda-nix-2.2.2";
-  aarch64-linux = "/nix/store/2gba4cyl4wvxzfbhmli90jy4n5aj0kjj-nix-2.2.2";
-  x86_64-darwin = "/nix/store/87i4fp46jfw9yl8c7i9gx75m5yph7irl-nix-2.2.2";
+  x86_64-linux = "/nix/store/3ds3cgji9vjxdbgp10av6smyym1126d1-nix-2.3";
+  i686-linux = "/nix/store/ln1ndqvfpc9cdl03vqxi6kvlxm9wfv9g-nix-2.3";
+  aarch64-linux = "/nix/store/n8a1rwzrp20qcr2c4hvyn6c5q9zx8csw-nix-2.3";
+  x86_64-darwin = "/nix/store/jq6npmpld02sz4rgniz0qrsdfnm6j17a-nix-2.3";
 }
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 290c29993b58..fe28cf7fa492 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -948,6 +948,7 @@
   ./virtualisation/openvswitch.nix
   ./virtualisation/parallels-guest.nix
   ./virtualisation/qemu-guest-agent.nix
+  ./virtualisation/railcar.nix
   ./virtualisation/rkt.nix
   ./virtualisation/virtualbox-guest.nix
   ./virtualisation/virtualbox-host.nix
diff --git a/nixos/modules/programs/plotinus.nix b/nixos/modules/programs/plotinus.nix
index 065e72d6c374..e3549c79588b 100644
--- a/nixos/modules/programs/plotinus.nix
+++ b/nixos/modules/programs/plotinus.nix
@@ -18,7 +18,7 @@ in
       enable = mkOption {
         default = false;
         description = ''
-          Whether to enable the Plotinus GTK+3 plugin.  Plotinus provides a
+          Whether to enable the Plotinus GTK 3 plugin. Plotinus provides a
           popup (triggered by Ctrl-Shift-P) to search the menus of a
           compatible application.
         '';
diff --git a/nixos/modules/programs/plotinus.xml b/nixos/modules/programs/plotinus.xml
index 902cd89e0c49..8fc8c22c6d76 100644
--- a/nixos/modules/programs/plotinus.xml
+++ b/nixos/modules/programs/plotinus.xml
@@ -13,10 +13,10 @@
   <link xlink:href="https://github.com/p-e-w/plotinus"/>
  </para>
  <para>
-  Plotinus is a searchable command palette in every modern GTK+ application.
+  Plotinus is a searchable command palette in every modern GTK application.
  </para>
  <para>
-  When in a GTK+3 application and Plotinus is enabled, you can press
+  When in a GTK 3 application and Plotinus is enabled, you can press
   <literal>Ctrl+Shift+P</literal> to open the command palette. The command
   palette provides a searchable list of of all menu items in the application.
  </para>
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 9e0ab60ca679..1fa91f05030d 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -34,6 +34,7 @@ with lib;
     (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
     (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "applyManifests" ] "")
     (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "")
+    (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "allowPrivileged" ] "")
     (mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
     (mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
     (mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
diff --git a/nixos/modules/services/cluster/kubernetes/addon-manager.nix b/nixos/modules/services/cluster/kubernetes/addon-manager.nix
index ad7d17c9c283..17f2dde31a71 100644
--- a/nixos/modules/services/cluster/kubernetes/addon-manager.nix
+++ b/nixos/modules/services/cluster/kubernetes/addon-manager.nix
@@ -62,50 +62,19 @@ in
       '';
     };
 
-    enable = mkEnableOption "Kubernetes addon manager";
-
-    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager";
-    bootstrapAddonsKubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager bootstrap";
+    enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
   };
 
   ###### implementation
-  config = let
-
-    addonManagerPaths = filter (a: a != null) [
-      cfg.kubeconfig.caFile
-      cfg.kubeconfig.certFile
-      cfg.kubeconfig.keyFile
-    ];
-    bootstrapAddonsPaths = filter (a: a != null) [
-      cfg.bootstrapAddonsKubeconfig.caFile
-      cfg.bootstrapAddonsKubeconfig.certFile
-      cfg.bootstrapAddonsKubeconfig.keyFile
-    ];
-
-  in mkIf cfg.enable {
+  config = mkIf cfg.enable {
     environment.etc."kubernetes/addons".source = "${addons}/";
 
-    #TODO: Get rid of kube-addon-manager in the future for the following reasons
-    # - it is basically just a shell script wrapped around kubectl
-    # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
-    # - it is designed to be used with k8s system components only
-    # - it would be better with a more Nix-oriented way of managing addons
     systemd.services.kube-addon-manager = {
       description = "Kubernetes addon manager";
       wantedBy = [ "kubernetes.target" ];
-      after = [ "kube-node-online.target" ];
-      before = [ "kubernetes.target" ];
-      environment = {
-        ADDON_PATH = "/etc/kubernetes/addons/";
-        KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager" cfg.kubeconfig;
-      };
-      path = with pkgs; [ gawk kubectl ];
-      preStart = ''
-        until kubectl -n kube-system get serviceaccounts/default 2>/dev/null; do
-          echo kubectl -n kube-system get serviceaccounts/default: exit status $?
-          sleep 2
-        done
-      '';
+      after = [ "kube-apiserver.service" ];
+      environment.ADDON_PATH = "/etc/kubernetes/addons/";
+      path = [ pkgs.gawk ];
       serviceConfig = {
         Slice = "kubernetes.slice";
         ExecStart = "${top.package}/bin/kube-addons";
@@ -115,52 +84,8 @@ in
         Restart = "on-failure";
         RestartSec = 10;
       };
-      unitConfig.ConditionPathExists = addonManagerPaths;
     };
 
-    systemd.paths.kube-addon-manager = {
-      wantedBy = [ "kube-addon-manager.service" ];
-      pathConfig = {
-        PathExists = addonManagerPaths;
-        PathChanged = addonManagerPaths;
-      };
-    };
-
-    services.kubernetes.addonManager.kubeconfig.server = mkDefault top.apiserverAddress;
-
-    systemd.services.kube-addon-manager-bootstrap = mkIf (top.apiserver.enable && top.addonManager.bootstrapAddons != {}) {
-      wantedBy = [ "kube-control-plane-online.target" ];
-      after = [ "kube-apiserver.service" ];
-      before = [ "kube-control-plane-online.target" ];
-      path = [ pkgs.kubectl ];
-      environment = {
-        KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager-bootstrap" cfg.bootstrapAddonsKubeconfig;
-      };
-      preStart = with pkgs; let
-        files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
-          cfg.bootstrapAddons;
-      in ''
-        until kubectl auth can-i '*' '*' -q 2>/dev/null; do
-          echo kubectl auth can-i '*' '*': exit status $?
-          sleep 2
-        done
-
-        kubectl apply -f ${concatStringsSep " \\\n -f " files}
-      '';
-      script = "echo Ok";
-      unitConfig.ConditionPathExists = bootstrapAddonsPaths;
-    };
-
-    systemd.paths.kube-addon-manager-bootstrap = {
-      wantedBy = [ "kube-addon-manager-bootstrap.service" ];
-      pathConfig = {
-        PathExists = bootstrapAddonsPaths;
-        PathChanged = bootstrapAddonsPaths;
-      };
-    };
-
-    services.kubernetes.addonManager.bootstrapAddonsKubeconfig.server = mkDefault top.apiserverAddress;
-
     services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
     (let
       name = system:kube-addon-manager;
diff --git a/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
index 5117726bee99..70f96d75a461 100644
--- a/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
+++ b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
@@ -169,23 +169,6 @@ in {
         };
       };
 
-      kubernetes-dashboard-cm = {
-        apiVersion = "v1";
-        kind = "ConfigMap";
-        metadata = {
-          labels = {
-            k8s-app = "kubernetes-dashboard";
-            # Allows editing resource and makes sure it is created first.
-            "addonmanager.kubernetes.io/mode" = "EnsureExists";
-          };
-          name = "kubernetes-dashboard-settings";
-          namespace = "kube-system";
-        };
-      };
-    };
-
-    services.kubernetes.addonManager.bootstrapAddons = mkMerge [{
-
       kubernetes-dashboard-sa = {
         apiVersion = "v1";
         kind = "ServiceAccount";
@@ -227,9 +210,20 @@ in {
         };
         type = "Opaque";
       };
-    }
-
-    (optionalAttrs cfg.rbac.enable
+      kubernetes-dashboard-cm = {
+        apiVersion = "v1";
+        kind = "ConfigMap";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
+          };
+          name = "kubernetes-dashboard-settings";
+          namespace = "kube-system";
+        };
+      };
+    } // (optionalAttrs cfg.rbac.enable
       (let
         subjects = [{
           kind = "ServiceAccount";
@@ -329,6 +323,6 @@ in {
             inherit subjects;
           };
         })
-    ))];
+    ));
   };
 }
diff --git a/nixos/modules/services/cluster/kubernetes/apiserver.nix b/nixos/modules/services/cluster/kubernetes/apiserver.nix
index f293dd79f42a..33796bf2e080 100644
--- a/nixos/modules/services/cluster/kubernetes/apiserver.nix
+++ b/nixos/modules/services/cluster/kubernetes/apiserver.nix
@@ -290,32 +290,11 @@ in
   ###### implementation
   config = mkMerge [
 
-    (let
-
-      apiserverPaths = filter (a: a != null) [
-        cfg.clientCaFile
-        cfg.etcd.caFile
-        cfg.etcd.certFile
-        cfg.etcd.keyFile
-        cfg.kubeletClientCaFile
-        cfg.kubeletClientCertFile
-        cfg.kubeletClientKeyFile
-        cfg.serviceAccountKeyFile
-        cfg.tlsCertFile
-        cfg.tlsKeyFile
-      ];
-      etcdPaths = filter (a: a != null) [
-        config.services.etcd.trustedCaFile
-        config.services.etcd.certFile
-        config.services.etcd.keyFile
-      ];
-
-    in mkIf cfg.enable {
+    (mkIf cfg.enable {
         systemd.services.kube-apiserver = {
           description = "Kubernetes APIServer Service";
-          wantedBy = [ "kube-control-plane-online.target" ];
-          after = [ "certmgr.service" ];
-          before = [ "kube-control-plane-online.target" ];
+          wantedBy = [ "kubernetes.target" ];
+          after = [ "network.target" ];
           serviceConfig = {
             Slice = "kubernetes.slice";
             ExecStart = ''${top.package}/bin/kube-apiserver \
@@ -386,15 +365,6 @@ in
             Restart = "on-failure";
             RestartSec = 5;
           };
-          unitConfig.ConditionPathExists = apiserverPaths;
-        };
-
-        systemd.paths.kube-apiserver = mkIf top.apiserver.enable {
-          wantedBy = [ "kube-apiserver.service" ];
-          pathConfig = {
-            PathExists = apiserverPaths;
-            PathChanged = apiserverPaths;
-          };
         };
 
         services.etcd = {
@@ -408,18 +378,6 @@ in
           initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
         };
 
-        systemd.services.etcd = {
-          unitConfig.ConditionPathExists = etcdPaths;
-        };
-
-        systemd.paths.etcd = {
-          wantedBy = [ "etcd.service" ];
-          pathConfig = {
-            PathExists = etcdPaths;
-            PathChanged = etcdPaths;
-          };
-        };
-
         services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
 
           apiserver-kubelet-api-admin-crb = {
diff --git a/nixos/modules/services/cluster/kubernetes/controller-manager.nix b/nixos/modules/services/cluster/kubernetes/controller-manager.nix
index b94e8bd86d4c..0b73d090f241 100644
--- a/nixos/modules/services/cluster/kubernetes/controller-manager.nix
+++ b/nixos/modules/services/cluster/kubernetes/controller-manager.nix
@@ -104,31 +104,11 @@ in
   };
 
   ###### implementation
-  config = let
-
-    controllerManagerPaths = filter (a: a != null) [
-      cfg.kubeconfig.caFile
-      cfg.kubeconfig.certFile
-      cfg.kubeconfig.keyFile
-      cfg.rootCaFile
-      cfg.serviceAccountKeyFile
-      cfg.tlsCertFile
-      cfg.tlsKeyFile
-    ];
-
-  in mkIf cfg.enable {
-    systemd.services.kube-controller-manager = rec {
+  config = mkIf cfg.enable {
+    systemd.services.kube-controller-manager = {
       description = "Kubernetes Controller Manager Service";
-      wantedBy = [ "kube-control-plane-online.target" ];
+      wantedBy = [ "kubernetes.target" ];
       after = [ "kube-apiserver.service" ];
-      before = [ "kube-control-plane-online.target" ];
-      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig;
-      preStart = ''
-        until kubectl auth can-i get /api -q 2>/dev/null; do
-          echo kubectl auth can-i get /api: exit status $?
-          sleep 2
-        done
-      '';
       serviceConfig = {
         RestartSec = "30s";
         Restart = "on-failure";
@@ -140,7 +120,7 @@ in
             "--cluster-cidr=${cfg.clusterCidr}"} \
           ${optionalString (cfg.featureGates != [])
             "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
-          --kubeconfig=${environment.KUBECONFIG} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
           --leader-elect=${boolToString cfg.leaderElect} \
           ${optionalString (cfg.rootCaFile!=null)
             "--root-ca-file=${cfg.rootCaFile}"} \
@@ -161,16 +141,7 @@ in
         User = "kubernetes";
         Group = "kubernetes";
       };
-      path = top.path ++ [ pkgs.kubectl ];
-      unitConfig.ConditionPathExists = controllerManagerPaths;
-    };
-
-    systemd.paths.kube-controller-manager = {
-      wantedBy = [ "kube-controller-manager.service" ];
-      pathConfig = {
-        PathExists = controllerManagerPaths;
-        PathChanged = controllerManagerPaths;
-      };
+      path = top.path;
     };
 
     services.kubernetes.pki.certs = with top.lib; {
diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix
index 823cc1c35f4e..3790ac9b6918 100644
--- a/nixos/modules/services/cluster/kubernetes/default.nix
+++ b/nixos/modules/services/cluster/kubernetes/default.nix
@@ -256,29 +256,6 @@ in {
         wantedBy = [ "multi-user.target" ];
       };
 
-      systemd.targets.kube-control-plane-online = {
-        wantedBy = [ "kubernetes.target" ];
-        before = [ "kubernetes.target" ];
-      };
-
-      systemd.services.kube-control-plane-online = {
-        description = "Kubernetes control plane is online";
-        wantedBy = [ "kube-control-plane-online.target" ];
-        after = [ "kube-scheduler.service" "kube-controller-manager.service" ];
-        before = [ "kube-control-plane-online.target" ];
-        path = [ pkgs.curl ];
-        preStart = ''
-          until curl -Ssf ${cfg.apiserverAddress}/healthz do
-            echo curl -Ssf ${cfg.apiserverAddress}/healthz: exit status $?
-            sleep 3
-          done
-        '';
-        script = "echo Ok";
-        serviceConfig = {
-          TimeoutSec = "500";
-        };
-      };
-
       systemd.tmpfiles.rules = [
         "d /opt/cni/bin 0755 root root -"
         "d /run/kubernetes 0755 kubernetes kubernetes -"
@@ -302,8 +279,6 @@ in {
       services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null
                           then cfg.apiserver.advertiseAddress
                           else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}");
-
-      services.kubernetes.kubeconfig.server = mkDefault cfg.apiserverAddress;
     })
   ];
 }
diff --git a/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixos/modules/services/cluster/kubernetes/flannel.nix
index d9437427d6d1..74d10d684375 100644
--- a/nixos/modules/services/cluster/kubernetes/flannel.nix
+++ b/nixos/modules/services/cluster/kubernetes/flannel.nix
@@ -14,36 +14,25 @@ let
     buildInputs = [ pkgs.makeWrapper ];
   } ''
     mkdir -p $out
-    cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh
 
     # bashInteractive needed for `compgen`
-    makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh"
+    makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "${pkgs.kubernetes}/bin/mk-docker-opts.sh"
   '';
 in
 {
   ###### interface
   options.services.kubernetes.flannel = {
-    enable = mkEnableOption "flannel networking";
-    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes flannel";
+    enable = mkEnableOption "enable flannel networking";
   };
 
   ###### implementation
-  config = let
-
-    flannelPaths = filter (a: a != null) [
-      cfg.kubeconfig.caFile
-      cfg.kubeconfig.certFile
-      cfg.kubeconfig.keyFile
-    ];
-    kubeconfig = top.lib.mkKubeConfig "flannel" cfg.kubeconfig;
-
-  in mkIf cfg.enable {
+  config = mkIf cfg.enable {
     services.flannel = {
 
       enable = mkDefault true;
       network = mkDefault top.clusterCidr;
-      inherit storageBackend kubeconfig;
-      nodeName = top.kubelet.hostname;
+      inherit storageBackend;
+      nodeName = config.services.kubernetes.kubelet.hostname;
     };
 
     services.kubernetes.kubelet = {
@@ -58,66 +47,24 @@ in
       }];
     };
 
-    systemd.services.mk-docker-opts = {
+    systemd.services."mk-docker-opts" = {
       description = "Pre-Docker Actions";
-      wantedBy = [ "flannel.target" ];
-      before = [ "flannel.target" ];
       path = with pkgs; [ gawk gnugrep ];
       script = ''
         ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
         systemctl restart docker
       '';
-      unitConfig.ConditionPathExists = [ "/run/flannel/subnet.env" ];
       serviceConfig.Type = "oneshot";
     };
 
-    systemd.paths.flannel-subnet-env = {
-      wantedBy = [ "mk-docker-opts.service" ];
-      pathConfig = {
-        PathExists = [ "/run/flannel/subnet.env" ];
-        PathChanged = [ "/run/flannel/subnet.env" ];
-        Unit = "mk-docker-opts.service";
-      };
-    };
-
-    systemd.targets.flannel = {
-      wantedBy = [ "kube-node-online.target" ];
-      before = [ "kube-node-online.target" ];
-    };
-
-    systemd.services.flannel = {
-      wantedBy = [ "flannel.target" ];
-      after = [ "kubelet.target" ];
-      before = [ "flannel.target" ];
-      path = with pkgs; [ iptables kubectl ];
-      environment.KUBECONFIG = kubeconfig;
-      preStart = let
-        args = [
-          "--selector=kubernetes.io/hostname=${top.kubelet.hostname}"
-          # flannel exits if node is not registered yet, before that there is no podCIDR
-          "--output=jsonpath={.items[0].spec.podCIDR}"
-          # if jsonpath cannot be resolved exit with status 1
-          "--allow-missing-template-keys=false"
-        ];
-      in ''
-        until kubectl get nodes ${concatStringsSep " " args} 2>/dev/null; do
-          echo Waiting for ${top.kubelet.hostname} to be RegisteredNode
-          sleep 1
-        done
-      '';
-      unitConfig.ConditionPathExists = flannelPaths;
-    };
-
-    systemd.paths.flannel = {
+    systemd.paths."flannel-subnet-env" = {
       wantedBy = [ "flannel.service" ];
       pathConfig = {
-        PathExists = flannelPaths;
-        PathChanged = flannelPaths;
+        PathModified = "/run/flannel/subnet.env";
+        Unit = "mk-docker-opts.service";
       };
     };
 
-    services.kubernetes.flannel.kubeconfig.server = mkDefault top.apiserverAddress;
-
     systemd.services.docker = {
       environment.DOCKER_OPTS = "-b none";
       serviceConfig.EnvironmentFile = "-/run/flannel/docker";
@@ -144,6 +91,7 @@ in
 
     # give flannel som kubernetes rbac permissions if applicable
     services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
+
       flannel-cr = {
         apiVersion = "rbac.authorization.k8s.io/v1beta1";
         kind = "ClusterRole";
@@ -179,6 +127,7 @@ in
           name = "flannel-client";
         }];
       };
+
     };
   };
 }
diff --git a/nixos/modules/services/cluster/kubernetes/kubelet.nix b/nixos/modules/services/cluster/kubernetes/kubelet.nix
index 4c5df96bcc6a..250da4c807ec 100644
--- a/nixos/modules/services/cluster/kubernetes/kubelet.nix
+++ b/nixos/modules/services/cluster/kubernetes/kubelet.nix
@@ -61,12 +61,6 @@ in
       type = str;
     };
 
-    allowPrivileged = mkOption {
-      description = "Whether to allow Kubernetes containers to request privileged mode.";
-      default = false;
-      type = bool;
-    };
-
     clusterDns = mkOption {
       description = "Use alternative DNS.";
       default = "10.1.0.1";
@@ -234,28 +228,21 @@ in
 
   ###### implementation
   config = mkMerge [
-    (let
-
-      kubeletPaths = filter (a: a != null) [
-        cfg.kubeconfig.caFile
-        cfg.kubeconfig.certFile
-        cfg.kubeconfig.keyFile
-        cfg.clientCaFile
-        cfg.tlsCertFile
-        cfg.tlsKeyFile
-      ];
-
-    in mkIf cfg.enable {
+    (mkIf cfg.enable {
       services.kubernetes.kubelet.seedDockerImages = [infraContainer];
 
       systemd.services.kubelet = {
         description = "Kubernetes Kubelet Service";
-        wantedBy = [ "kubelet.target" ];
-        after = [ "kube-control-plane-online.target" ];
-        before = [ "kubelet.target" ];
+        wantedBy = [ "kubernetes.target" ];
+        after = [ "network.target" "docker.service" "kube-apiserver.service" ];
         path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
         preStart = ''
-          rm -f /opt/cni/bin/* || true
+          ${concatMapStrings (img: ''
+            echo "Seeding docker image: ${img}"
+            docker load <${img}
+          '') cfg.seedDockerImages}
+
+          rm /opt/cni/bin/* || true
           ${concatMapStrings (package: ''
             echo "Linking cni package: ${package}"
             ln -fs ${package}/bin/* /opt/cni/bin
@@ -269,7 +256,6 @@ in
           RestartSec = "1000ms";
           ExecStart = ''${top.package}/bin/kubelet \
             --address=${cfg.address} \
-            --allow-privileged=${boolToString cfg.allowPrivileged} \
             --authentication-token-webhook \
             --authentication-token-webhook-cache-ttl="10s" \
             --authorization-mode=Webhook \
@@ -308,56 +294,6 @@ in
           '';
           WorkingDirectory = top.dataDir;
         };
-        unitConfig.ConditionPathExists = kubeletPaths;
-      };
-
-      systemd.paths.kubelet = {
-        wantedBy =  [ "kubelet.service" ];
-        pathConfig = {
-          PathExists = kubeletPaths;
-          PathChanged = kubeletPaths;
-        };
-      };
-
-      systemd.services.docker.before = [ "kubelet.service" ];
-
-      systemd.services.docker-seed-images = {
-        wantedBy = [ "docker.service" ];
-        after = [ "docker.service" ];
-        before = [ "kubelet.service" ];
-        path = with pkgs; [ docker ];
-        preStart = ''
-          ${concatMapStrings (img: ''
-            echo "Seeding docker image: ${img}"
-            docker load <${img}
-          '') cfg.seedDockerImages}
-        '';
-        script = "echo Ok";
-        serviceConfig.Type = "oneshot";
-        serviceConfig.RemainAfterExit = true;
-        serviceConfig.Slice = "kubernetes.slice";
-      };
-
-      systemd.services.kubelet-online = {
-        wantedBy = [ "kube-node-online.target" ];
-        after = [ "flannel.target" "kubelet.target" ];
-        before = [ "kube-node-online.target" ];
-        # it is complicated. flannel needs kubelet to run the pause container before
-        # it discusses the node CIDR with apiserver and afterwards configures and restarts
-        # dockerd. Until then prevent creating any pods because they have to be recreated anyway
-        # because the network of docker0 has been changed by flannel.
-        script = let
-          docker-env = "/run/flannel/docker";
-          flannel-date = "stat --print=%Y ${docker-env}";
-          docker-date = "systemctl show --property=ActiveEnterTimestamp --value docker";
-        in ''
-          until test -f ${docker-env} ; do sleep 1 ; done
-          while test `${flannel-date}` -gt `date +%s --date="$(${docker-date})"` ; do
-            sleep 1
-          done
-        '';
-        serviceConfig.Type = "oneshot";
-        serviceConfig.Slice = "kubernetes.slice";
       };
 
       # Allways include cni plugins
@@ -404,16 +340,5 @@ in
       };
     })
 
-    {
-      systemd.targets.kubelet = {
-        wantedBy = [ "kube-node-online.target" ];
-        before = [ "kube-node-online.target" ];
-      };
-
-      systemd.targets.kube-node-online = {
-        wantedBy = [ "kubernetes.target" ];
-        before = [ "kubernetes.target" ];
-      };
-    }
   ];
 }
diff --git a/nixos/modules/services/cluster/kubernetes/pki.nix b/nixos/modules/services/cluster/kubernetes/pki.nix
index 47384ae50a07..733479e24c97 100644
--- a/nixos/modules/services/cluster/kubernetes/pki.nix
+++ b/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -27,11 +27,12 @@ let
   certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
   cfsslAPITokenLength = 32;
 
-  clusterAdminKubeconfig = with cfg.certs.clusterAdmin; {
-    server = top.apiserverAddress;
-    certFile = cert;
-    keyFile = key;
-  };
+  clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
+    top.lib.mkKubeConfig "cluster-admin" {
+        server = top.apiserverAddress;
+        certFile = cert;
+        keyFile = key;
+    };
 
   remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
 in
@@ -118,11 +119,6 @@ in
     cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
     cfsslCert = "${cfsslCertPathPrefix}.pem";
     cfsslKey = "${cfsslCertPathPrefix}-key.pem";
-
-    certmgrPaths = [
-      top.caFile
-      certmgrAPITokenPath
-    ];
   in
   {
 
@@ -172,40 +168,13 @@ in
         chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
       '')]);
 
-    systemd.targets.cfssl-online = {
-      wantedBy = [ "network-online.target" ];
-      after = [ "cfssl.service" "network-online.target" "cfssl-online.service" ];
-    };
-
-    systemd.services.cfssl-online = {
-      description = "Wait for ${remote} to be reachable.";
-      wantedBy = [ "cfssl-online.target" ];
-      before = [ "cfssl-online.target" ];
-      path = [ pkgs.curl ];
-      preStart = ''
-        until curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o /dev/null; do
-          echo curl ${remote}/api/v1/cfssl/info: exit status $?
-          sleep 2
-        done
-      '';
-      script = "echo Ok";
-      serviceConfig = {
-        TimeoutSec = "300";
-      };
-    };
-
     systemd.services.kube-certmgr-bootstrap = {
       description = "Kubernetes certmgr bootstrapper";
-      wantedBy = [ "cfssl-online.target" ];
-      after = [ "cfssl-online.target" ];
-      before = [ "certmgr.service" ];
-      path = with pkgs; [ curl cfssl ];
+      wantedBy = [ "certmgr.service" ];
+      after = [ "cfssl.target" ];
       script = concatStringsSep "\n" [''
         set -e
 
-        mkdir -p $(dirname ${certmgrAPITokenPath})
-        mkdir -p $(dirname ${top.caFile})
-
         # If there's a cfssl (cert issuer) running locally, then don't rely on user to
         # manually paste it in place. Just symlink.
         # otherwise, create the target file, ready for users to insert the token
@@ -217,18 +186,15 @@ in
         fi
       ''
       (optionalString (cfg.pkiTrustOnBootstrap) ''
-        if [ ! -s "${top.caFile}" ]; then
-          until test -s ${top.caFile}.json; do
-            sleep 2
-            curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o ${top.caFile}.json
-          done
-          cfssljson -f ${top.caFile}.json -stdout >${top.caFile}
-          rm ${top.caFile}.json
+        if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
+          ${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
+            ${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
         fi
       '')
       ];
       serviceConfig = {
-        TimeoutSec = "500";
+        RestartSec = "10s";
+        Restart = "on-failure";
       };
     };
 
@@ -264,28 +230,35 @@ in
           mapAttrs mkSpec cfg.certs;
       };
 
-      systemd.services.certmgr = {
-        wantedBy = [ "cfssl-online.target" ];
-        after = [ "cfssl-online.target" "kube-certmgr-bootstrap.service" ];
-        preStart = ''
-          while ! test -s ${certmgrAPITokenPath} ; do
-            sleep 1
-            echo Waiting for ${certmgrAPITokenPath}
-          done
-        '';
-        unitConfig.ConditionPathExists = certmgrPaths;
-      };
-
-      systemd.paths.certmgr = {
-        wantedBy = [ "certmgr.service" ];
-        pathConfig = {
-          PathExists = certmgrPaths;
-          PathChanged = certmgrPaths;
-        };
-      };
-
-      environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (cfg.etcClusterAdminKubeconfig != null)
-        (top.lib.mkKubeConfig "cluster-admin" clusterAdminKubeconfig);
+      #TODO: Get rid of kube-addon-manager in the future for the following reasons
+      # - it is basically just a shell script wrapped around kubectl
+      # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
+      # - it is designed to be used with k8s system components only
+      # - it would be better with a more Nix-oriented way of managing addons
+      systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
+        environment.KUBECONFIG = with cfg.certs.addonManager;
+          top.lib.mkKubeConfig "addon-manager" {
+            server = top.apiserverAddress;
+            certFile = cert;
+            keyFile = key;
+          };
+        }
+
+        (optionalAttrs (top.addonManager.bootstrapAddons != {}) {
+          serviceConfig.PermissionsStartOnly = true;
+          preStart = with pkgs;
+          let
+            files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
+              top.addonManager.bootstrapAddons;
+          in
+          ''
+            export KUBECONFIG=${clusterAdminKubeconfig}
+            ${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
+          '';
+        })]);
+
+      environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
+        clusterAdminKubeconfig;
 
       environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
       (pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
@@ -311,22 +284,38 @@ in
           exit 1
         fi
 
-        do_restart=$(test -s ${certmgrAPITokenPath} && echo -n y || echo -n n)
-
         echo $token > ${certmgrAPITokenPath}
         chmod 600 ${certmgrAPITokenPath}
 
-        if [ y = $do_restart ]; then
-          echo "Restarting certmgr..." >&1
-          systemctl restart certmgr
-        fi
+        echo "Restarting certmgr..." >&1
+        systemctl restart certmgr
+
+        echo "Waiting for certs to appear..." >&1
+
+        ${optionalString top.kubelet.enable ''
+          while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
+          echo "Restarting kubelet..." >&1
+          systemctl restart kubelet
+        ''}
+
+        ${optionalString top.proxy.enable ''
+          while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
+          echo "Restarting kube-proxy..." >&1
+          systemctl restart kube-proxy
+        ''}
 
-        echo "Node joined succesfully" >&1
+        ${optionalString top.flannel.enable ''
+          while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
+          echo "Restarting flannel..." >&1
+          systemctl restart flannel
+        ''}
+
+        echo "Node joined succesfully"
       '')];
 
       # isolate etcd on loopback at the master node
       # easyCerts doesn't support multimaster clusters anyway atm.
-      services.etcd = mkIf top.apiserver.enable (with cfg.certs.etcd; {
+      services.etcd = with cfg.certs.etcd; {
         listenClientUrls = ["https://127.0.0.1:2379"];
         listenPeerUrls = ["https://127.0.0.1:2380"];
         advertiseClientUrls = ["https://etcd.local:2379"];
@@ -335,11 +324,19 @@ in
         certFile = mkDefault cert;
         keyFile = mkDefault key;
         trustedCaFile = mkDefault caCert;
-      });
+      };
       networking.extraHosts = mkIf (config.services.etcd.enable) ''
         127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
       '';
 
+      services.flannel = with cfg.certs.flannelClient; {
+        kubeconfig = top.lib.mkKubeConfig "flannel" {
+          server = top.apiserverAddress;
+          certFile = cert;
+          keyFile = key;
+        };
+      };
+
       services.kubernetes = {
 
         apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
@@ -359,13 +356,6 @@ in
           proxyClientCertFile = mkDefault cfg.certs.apiserverProxyClient.cert;
           proxyClientKeyFile = mkDefault cfg.certs.apiserverProxyClient.key;
         });
-        addonManager = mkIf top.addonManager.enable {
-          kubeconfig = with cfg.certs.addonManager; {
-            certFile = mkDefault cert;
-            keyFile = mkDefault key;
-          };
-          bootstrapAddonsKubeconfig = clusterAdminKubeconfig;
-        };
         controllerManager = mkIf top.controllerManager.enable {
           serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
           rootCaFile = cfg.certs.controllerManagerClient.caCert;
@@ -374,12 +364,6 @@ in
             keyFile = mkDefault key;
           };
         };
-        flannel = mkIf top.flannel.enable {
-          kubeconfig = with cfg.certs.flannelClient; {
-            certFile = cert;
-            keyFile = key;
-          };
-        };
         scheduler = mkIf top.scheduler.enable {
           kubeconfig = with cfg.certs.schedulerClient; {
             certFile = mkDefault cert;
diff --git a/nixos/modules/services/cluster/kubernetes/proxy.nix b/nixos/modules/services/cluster/kubernetes/proxy.nix
index 23f4d97b7030..bd4bf04ea833 100644
--- a/nixos/modules/services/cluster/kubernetes/proxy.nix
+++ b/nixos/modules/services/cluster/kubernetes/proxy.nix
@@ -45,28 +45,12 @@ in
   };
 
   ###### implementation
-  config = let
-
-    proxyPaths = filter (a: a != null) [
-      cfg.kubeconfig.caFile
-      cfg.kubeconfig.certFile
-      cfg.kubeconfig.keyFile
-    ];
-
-  in mkIf cfg.enable {
-    systemd.services.kube-proxy = rec {
+  config = mkIf cfg.enable {
+    systemd.services.kube-proxy = {
       description = "Kubernetes Proxy Service";
-      wantedBy = [ "kube-node-online.target" ];
-      after = [ "kubelet-online.service" ];
-      before = [ "kube-node-online.target" ];
-      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig;
-      path = with pkgs; [ iptables conntrack_tools kubectl ];
-      preStart = ''
-        until kubectl auth can-i get nodes/${top.kubelet.hostname} -q 2>/dev/null; do
-          echo kubectl auth can-i get nodes/${top.kubelet.hostname}: exit status $?
-          sleep 2
-        done
-      '';
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      path = with pkgs; [ iptables conntrack_tools ];
       serviceConfig = {
         Slice = "kubernetes.slice";
         ExecStart = ''${top.package}/bin/kube-proxy \
@@ -75,7 +59,7 @@ in
             "--cluster-cidr=${top.clusterCidr}"} \
           ${optionalString (cfg.featureGates != [])
             "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
-          --kubeconfig=${environment.KUBECONFIG} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
           ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
           ${cfg.extraOpts}
         '';
@@ -83,15 +67,6 @@ in
         Restart = "on-failure";
         RestartSec = 5;
       };
-      unitConfig.ConditionPathExists = proxyPaths;
-    };
-
-    systemd.paths.kube-proxy = {
-      wantedBy = [ "kube-proxy.service" ];
-      pathConfig = {
-        PathExists = proxyPaths;
-        PathChanged = proxyPaths;
-      };
     };
 
     services.kubernetes.pki.certs = {
diff --git a/nixos/modules/services/cluster/kubernetes/scheduler.nix b/nixos/modules/services/cluster/kubernetes/scheduler.nix
index a0e484542951..5f6113227d9d 100644
--- a/nixos/modules/services/cluster/kubernetes/scheduler.nix
+++ b/nixos/modules/services/cluster/kubernetes/scheduler.nix
@@ -56,35 +56,18 @@ in
   };
 
   ###### implementation
-  config =  let
-
-    schedulerPaths = filter (a: a != null) [
-      cfg.kubeconfig.caFile
-      cfg.kubeconfig.certFile
-      cfg.kubeconfig.keyFile
-    ];
-
-  in mkIf cfg.enable {
-    systemd.services.kube-scheduler = rec {
+  config = mkIf cfg.enable {
+    systemd.services.kube-scheduler = {
       description = "Kubernetes Scheduler Service";
-      wantedBy = [ "kube-control-plane-online.target" ];
+      wantedBy = [ "kubernetes.target" ];
       after = [ "kube-apiserver.service" ];
-      before = [ "kube-control-plane-online.target" ];
-      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig;
-      path = [ pkgs.kubectl ];
-      preStart = ''
-        until kubectl auth can-i get /api -q 2>/dev/null; do
-          echo kubectl auth can-i get /api: exit status $?
-          sleep 2
-        done
-      '';
       serviceConfig = {
         Slice = "kubernetes.slice";
         ExecStart = ''${top.package}/bin/kube-scheduler \
           --address=${cfg.address} \
           ${optionalString (cfg.featureGates != [])
             "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
-          --kubeconfig=${environment.KUBECONFIG} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
           --leader-elect=${boolToString cfg.leaderElect} \
           --port=${toString cfg.port} \
           ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
@@ -96,15 +79,6 @@ in
         Restart = "on-failure";
         RestartSec = 5;
       };
-      unitConfig.ConditionPathExists = schedulerPaths;
-    };
-
-    systemd.paths.kube-scheduler = {
-      wantedBy = [ "kube-scheduler.service" ];
-      pathConfig = {
-        PathExists = schedulerPaths;
-        PathChanged = schedulerPaths;
-      };
     };
 
     services.kubernetes.pki.certs = {
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 10250bb5193a..1ed4d3290ced 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -81,6 +81,10 @@ in
         default = "";
         description = ''
           Defines the mapping from system users to database users.
+
+          The general form is:
+
+          map-name system-username database-username
         '';
       };
 
diff --git a/nixos/modules/services/editors/emacs.xml b/nixos/modules/services/editors/emacs.xml
index 8ced302bad1e..03483f69fa2f 100644
--- a/nixos/modules/services/editors/emacs.xml
+++ b/nixos/modules/services/editors/emacs.xml
@@ -59,7 +59,7 @@
        <para>
         The latest stable version of Emacs 25 using the
         <link
-                xlink:href="http://www.gtk.org">GTK+ 2</link>
+                xlink:href="http://www.gtk.org">GTK 2</link>
         widget toolkit.
        </para>
       </listitem>
@@ -321,7 +321,7 @@ https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides
    <para>
     If you want, you can tweak the Emacs package itself from your
     <filename>emacs.nix</filename>. For example, if you want to have a
-    GTK+3-based Emacs instead of the default GTK+2-based binary and remove the
+    GTK 3-based Emacs instead of the default GTK 2-based binary and remove the
     automatically generated <filename>emacs.desktop</filename> (useful is you
     only use <command>emacsclient</command>), you can change your file
     <filename>emacs.nix</filename> in this way:
@@ -349,7 +349,7 @@ in [...]
 
    <para>
     After building this file as shown in <xref linkend="ex-emacsNix" />, you
-    will get an GTK3-based Emacs binary pre-loaded with your favorite packages.
+    will get an GTK 3-based Emacs binary pre-loaded with your favorite packages.
    </para>
   </section>
  </section>
diff --git a/nixos/modules/services/misc/zookeeper.nix b/nixos/modules/services/misc/zookeeper.nix
index 50c84e3c6b80..5d91e44a199d 100644
--- a/nixos/modules/services/misc/zookeeper.nix
+++ b/nixos/modules/services/misc/zookeeper.nix
@@ -121,6 +121,7 @@ in {
 
     systemd.tmpfiles.rules = [
       "d '${cfg.dataDir}' 0700 zookeeper - - -"
+      "Z '${cfg.dataDir}' 0700 zookeeper - - -"
     ];
 
     systemd.services.zookeeper = {
diff --git a/nixos/modules/services/network-filesystems/ceph.nix b/nixos/modules/services/network-filesystems/ceph.nix
index 0191b0640f00..3dc5b8feef65 100644
--- a/nixos/modules/services/network-filesystems/ceph.nix
+++ b/nixos/modules/services/network-filesystems/ceph.nix
@@ -3,18 +3,18 @@
 with lib;
 
 let
-  ceph = pkgs.ceph;
   cfg  = config.services.ceph;
+
   # function that translates "camelCaseOptions" to "camel case options", credits to tilpner in #nixos@freenode
-  translateOption = replaceStrings upperChars (map (s: " ${s}") lowerChars);
-  generateDaemonList = (daemonType: daemons: extraServiceConfig:
-    mkMerge (
-      map (daemon:
-        { "ceph-${daemonType}-${daemon}" = generateServiceFile daemonType daemon cfg.global.clusterName ceph extraServiceConfig; }
-      ) daemons
-    )
-  );
-  generateServiceFile = (daemonType: daemonId: clusterName: ceph: extraServiceConfig: {
+  expandCamelCase = replaceStrings upperChars (map (s: " ${s}") lowerChars);
+  expandCamelCaseAttrs = mapAttrs' (name: value: nameValuePair (expandCamelCase name) value);
+
+  makeServices = (daemonType: daemonIds: extraServiceConfig:
+    mkMerge (map (daemonId:
+      { "ceph-${daemonType}-${daemonId}" = makeService daemonType daemonId cfg.global.clusterName pkgs.ceph extraServiceConfig; })
+      daemonIds));
+
+  makeService = (daemonType: daemonId: clusterName: ceph: extraServiceConfig: {
     enable = true;
     description = "Ceph ${builtins.replaceStrings lowerChars upperChars daemonType} daemon ${daemonId}";
     after = [ "network-online.target" "time-sync.target" ] ++ optional (daemonType == "osd") "ceph-mon.target";
@@ -34,23 +34,29 @@ let
       Restart = "on-failure";
       StartLimitBurst = "5";
       StartLimitInterval = "30min";
-      ExecStart = "${ceph.out}/bin/${if daemonType == "rgw" then "radosgw" else "ceph-${daemonType}"} -f --cluster ${clusterName} --id ${if daemonType == "rgw" then "client.${daemonId}" else daemonId} --setuser ceph --setgroup ceph";
+      ExecStart = ''${ceph.out}/bin/${if daemonType == "rgw" then "radosgw" else "ceph-${daemonType}"} \
+                    -f --cluster ${clusterName} --id ${daemonId} --setuser ceph \
+                    --setgroup ${if daemonType == "osd" then "disk" else "ceph"}'';
     } // extraServiceConfig
-      // optionalAttrs (daemonType == "osd") { ExecStartPre = "${ceph.out}/libexec/ceph/ceph-osd-prestart.sh --id ${daemonId} --cluster ${clusterName}"; };
-    } // optionalAttrs (builtins.elem daemonType [ "mds" "mon" "rgw" "mgr" ]) { preStart = ''
+      // optionalAttrs (daemonType == "osd") { ExecStartPre = ''${ceph.lib}/libexec/ceph/ceph-osd-prestart.sh \
+                                                              --id ${daemonId} --cluster ${clusterName}''; };
+    } // optionalAttrs (builtins.elem daemonType [ "mds" "mon" "rgw" "mgr" ]) {
+      preStart = ''
         daemonPath="/var/lib/ceph/${if daemonType == "rgw" then "radosgw" else daemonType}/${clusterName}-${daemonId}"
-        if [ ! -d ''$daemonPath ]; then
-          mkdir -m 755 -p ''$daemonPath
-          chown -R ceph:ceph ''$daemonPath
+        if [ ! -d $daemonPath ]; then
+          mkdir -m 755 -p $daemonPath
+          chown -R ceph:ceph $daemonPath
         fi
       '';
     } // optionalAttrs (daemonType == "osd") { path = [ pkgs.getopt ]; }
   );
-  generateTargetFile = (daemonType:
+
+  makeTarget = (daemonType:
     {
       "ceph-${daemonType}" = {
         description = "Ceph target allowing to start/stop all ceph-${daemonType} services at once";
         partOf = [ "ceph.target" ];
+        wantedBy = [ "ceph.target" ];
         before = [ "ceph.target" ];
       };
     }
@@ -82,6 +88,14 @@ in
         '';
       };
 
+      mgrModulePath = mkOption {
+        type = types.path;
+        default = "${pkgs.ceph.lib}/lib/ceph/mgr";
+        description = ''
+          Path at which to find ceph-mgr modules.
+        '';
+      };
+
       monInitialMembers = mkOption {
         type = with types; nullOr commas;
         default = null;
@@ -157,6 +171,27 @@ in
           A comma-separated list of subnets that will be used as cluster networks in the cluster.
         '';
       };
+
+      rgwMimeTypesFile = mkOption {
+        type = with types; nullOr path;
+        default = "${pkgs.mime-types}/etc/mime.types";
+        description = ''
+          Path to mime types used by radosgw.
+        '';
+      };
+    };
+
+    extraConfig = mkOption {
+      type = with types; attrsOf str;
+      default = {};
+      example = ''
+        {
+          "ms bind ipv6" = "true";
+        };
+      '';
+      description = ''
+        Extra configuration to add to the global section. Use for setting values that are common for all daemons in the cluster.
+      '';
     };
 
     mgr = {
@@ -216,6 +251,7 @@ in
           to the id part in ceph i.e. [ "name1" ] would result in osd.name1
         '';
       };
+
       extraConfig = mkOption {
         type = with types; attrsOf str;
         default = {
@@ -296,9 +332,6 @@ in
       { assertion = cfg.global.fsid != "";
         message = "fsid has to be set to a valid uuid for the cluster to function";
       }
-      { assertion = cfg.mgr.enable == true;
-        message = "ceph 12.x requires atleast 1 MGR daemon enabled for the cluster to function";
-      }
       { assertion = cfg.mon.enable == true -> cfg.mon.daemons != [];
         message = "have to set id of atleast one MON if you're going to enable Monitor";
       }
@@ -317,14 +350,12 @@ in
       ''Not setting up a list of members in monInitialMembers requires that you set the host variable for each mon daemon or else the cluster won't function'';
 
     environment.etc."ceph/ceph.conf".text = let
-      # Translate camelCaseOptions to the expected camel case option for ceph.conf
-      translatedGlobalConfig = mapAttrs' (name: value: nameValuePair (translateOption name) value) cfg.global;
       # Merge the extraConfig set for mgr daemons, as mgr don't have their own section
-      globalAndMgrConfig = translatedGlobalConfig // optionalAttrs cfg.mgr.enable cfg.mgr.extraConfig;
+      globalSection = expandCamelCaseAttrs (cfg.global // cfg.extraConfig // optionalAttrs cfg.mgr.enable cfg.mgr.extraConfig);
       # Remove all name-value pairs with null values from the attribute set to avoid making empty sections in the ceph.conf
-      globalConfig = mapAttrs' (name: value: nameValuePair (translateOption name) value) (filterAttrs (name: value: value != null) globalAndMgrConfig);
+      globalSection' = filterAttrs (name: value: value != null) globalSection;
       totalConfig = {
-          global = globalConfig;
+          global = globalSection';
         } // optionalAttrs (cfg.mon.enable && cfg.mon.extraConfig != {}) { mon = cfg.mon.extraConfig; }
           // optionalAttrs (cfg.mds.enable && cfg.mds.extraConfig != {}) { mds = cfg.mds.extraConfig; }
           // optionalAttrs (cfg.osd.enable && cfg.osd.extraConfig != {}) { osd = cfg.osd.extraConfig; }
@@ -336,8 +367,9 @@ in
       name = "ceph";
       uid = config.ids.uids.ceph;
       description = "Ceph daemon user";
+      group = "ceph";
+      extraGroups = [ "disk" ];
     };
-
     users.groups = singleton {
       name = "ceph";
       gid = config.ids.gids.ceph;
@@ -345,22 +377,26 @@ in
 
     systemd.services = let
       services = []
-        ++ optional cfg.mon.enable (generateDaemonList "mon" cfg.mon.daemons { RestartSec = "10"; })
-        ++ optional cfg.mds.enable (generateDaemonList "mds" cfg.mds.daemons { StartLimitBurst = "3"; })
-        ++ optional cfg.osd.enable (generateDaemonList "osd" cfg.osd.daemons { StartLimitBurst = "30"; RestartSec = "20s"; })
-        ++ optional cfg.rgw.enable (generateDaemonList "rgw" cfg.rgw.daemons { })
-        ++ optional cfg.mgr.enable (generateDaemonList "mgr" cfg.mgr.daemons { StartLimitBurst = "3"; });
+        ++ optional cfg.mon.enable (makeServices "mon" cfg.mon.daemons { RestartSec = "10"; })
+        ++ optional cfg.mds.enable (makeServices "mds" cfg.mds.daemons { StartLimitBurst = "3"; })
+        ++ optional cfg.osd.enable (makeServices "osd" cfg.osd.daemons { StartLimitBurst = "30";
+                                                                         RestartSec = "20s";
+                                                                         PrivateDevices = "no"; # osd needs disk access
+                                                                       })
+        ++ optional cfg.rgw.enable (makeServices "rgw" cfg.rgw.daemons { })
+        ++ optional cfg.mgr.enable (makeServices "mgr" cfg.mgr.daemons { StartLimitBurst = "3"; });
       in
         mkMerge services;
 
     systemd.targets = let
       targets = [
-        { ceph = { description = "Ceph target allowing to start/stop all ceph service instances at once"; }; }
-      ] ++ optional cfg.mon.enable (generateTargetFile "mon")
-        ++ optional cfg.mds.enable (generateTargetFile "mds")
-        ++ optional cfg.osd.enable (generateTargetFile "osd")
-        ++ optional cfg.rgw.enable (generateTargetFile "rgw")
-        ++ optional cfg.mgr.enable (generateTargetFile "mgr");
+        { "ceph" = { description = "Ceph target allowing to start/stop all ceph service instances at once";
+                     wantedBy = [ "multi-user.target" ]; }; }
+      ] ++ optional cfg.mon.enable (makeTarget "mon")
+        ++ optional cfg.mds.enable (makeTarget "mds")
+        ++ optional cfg.osd.enable (makeTarget "osd")
+        ++ optional cfg.rgw.enable (makeTarget "rgw")
+        ++ optional cfg.mgr.enable (makeTarget "mgr");
       in
         mkMerge targets;
 
diff --git a/nixos/modules/services/web-servers/darkhttpd.nix b/nixos/modules/services/web-servers/darkhttpd.nix
index 80870118c334..d6649fd472d9 100644
--- a/nixos/modules/services/web-servers/darkhttpd.nix
+++ b/nixos/modules/services/web-servers/darkhttpd.nix
@@ -67,7 +67,7 @@ in {
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         DynamicUser = true;
-        ExecStart = "${cfg.package}/bin/darkhttpd ${args}";
+        ExecStart = "${pkgs.darkhttpd}/bin/darkhttpd ${args}";
         AmbientCapabilities = lib.mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
         Restart = "on-failure";
         RestartSec = "2s";
diff --git a/nixos/modules/services/x11/desktop-managers/enlightenment.nix b/nixos/modules/services/x11/desktop-managers/enlightenment.nix
index 527e4b18045b..9914b6687090 100644
--- a/nixos/modules/services/x11/desktop-managers/enlightenment.nix
+++ b/nixos/modules/services/x11/desktop-managers/enlightenment.nix
@@ -31,7 +31,7 @@ in
       e.efl e.enlightenment
       e.terminology e.econnman
       pkgs.xorg.xauth # used by kdesu
-      pkgs.gtk2 # To get GTK+'s themes.
+      pkgs.gtk2 # To get GTK's themes.
       pkgs.tango-icon-theme
 
       pkgs.gnome2.gnome_icon_theme
@@ -48,7 +48,7 @@ in
     services.xserver.desktopManager.session = [
     { name = "Enlightenment";
       start = ''
-        # Set GTK_DATA_PREFIX so that GTK+ can find the themes
+        # Set GTK_DATA_PREFIX so that GTK can find the themes
         export GTK_DATA_PREFIX=${config.system.path}
         # find theme engines
         export GTK_PATH=${config.system.path}/lib/gtk-3.0:${config.system.path}/lib/gtk-2.0
diff --git a/nixos/modules/services/x11/desktop-managers/mate.nix b/nixos/modules/services/x11/desktop-managers/mate.nix
index 6a2aa650c0b2..a9ca945fc669 100644
--- a/nixos/modules/services/x11/desktop-managers/mate.nix
+++ b/nixos/modules/services/x11/desktop-managers/mate.nix
@@ -48,7 +48,7 @@ in
       name = "mate";
       bgSupport = true;
       start = ''
-        # Set GTK_DATA_PREFIX so that GTK+ can find the themes
+        # Set GTK_DATA_PREFIX so that GTK can find the themes
         export GTK_DATA_PREFIX=${config.system.path}
 
         # Find theme engines
diff --git a/nixos/modules/services/x11/desktop-managers/xfce.nix b/nixos/modules/services/x11/desktop-managers/xfce.nix
index 1102f73d1ac3..e3249aef50c7 100644
--- a/nixos/modules/services/x11/desktop-managers/xfce.nix
+++ b/nixos/modules/services/x11/desktop-managers/xfce.nix
@@ -48,7 +48,7 @@ in
 
   config = mkIf cfg.enable {
     environment.systemPackages = with pkgs.xfce // pkgs; [
-      # Get GTK+ themes and gtk-update-icon-cache
+      # Get GTK themes and gtk-update-icon-cache
       gtk2.out
 
       # Supplies some abstract icons such as:
@@ -107,10 +107,10 @@ in
       start = ''
         ${cfg.extraSessionCommands}
 
-        # Set GTK_PATH so that GTK+ can find the theme engines.
+        # Set GTK_PATH so that GTK can find the theme engines.
         export GTK_PATH="${config.system.path}/lib/gtk-2.0:${config.system.path}/lib/gtk-3.0"
 
-        # Set GTK_DATA_PREFIX so that GTK+ can find the Xfce themes.
+        # Set GTK_DATA_PREFIX so that GTK can find the Xfce themes.
         export GTK_DATA_PREFIX=${config.system.path}
 
         ${pkgs.runtimeShell} ${pkgs.xfce.xinitrc} &
diff --git a/nixos/modules/services/x11/desktop-managers/xfce4-14.nix b/nixos/modules/services/x11/desktop-managers/xfce4-14.nix
index 16329c093f98..55c88223e788 100644
--- a/nixos/modules/services/x11/desktop-managers/xfce4-14.nix
+++ b/nixos/modules/services/x11/desktop-managers/xfce4-14.nix
@@ -114,10 +114,10 @@ in
       name = "xfce4-14";
       bgSupport = true;
       start = ''
-        # Set GTK_PATH so that GTK+ can find the theme engines.
+        # Set GTK_PATH so that GTK can find the theme engines.
         export GTK_PATH="${config.system.path}/lib/gtk-2.0:${config.system.path}/lib/gtk-3.0"
 
-        # Set GTK_DATA_PREFIX so that GTK+ can find the Xfce themes.
+        # Set GTK_DATA_PREFIX so that GTK can find the Xfce themes.
         export GTK_DATA_PREFIX=${config.system.path}
 
         ${pkgs.runtimeShell} ${pkgs.xfce4-14.xinitrc} &
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index 0c4ad90b4eb6..aadfc5add350 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -25,6 +25,9 @@ in
       { assertion = cfg.hvm;
         message = "Paravirtualized EC2 instances are no longer supported.";
       }
+      { assertion = cfg.efi -> cfg.hvm;
+        message = "EC2 instances using EFI must be HVM instances.";
+      }
     ];
 
     boot.growPartition = cfg.hvm;
@@ -35,6 +38,11 @@ in
       autoResize = true;
     };
 
+    fileSystems."/boot" = mkIf cfg.efi {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
     boot.extraModulePackages = [
       config.boot.kernelPackages.ena
     ];
@@ -50,8 +58,10 @@ in
 
     # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
     boot.loader.grub.version = if cfg.hvm then 2 else 1;
-    boot.loader.grub.device = if cfg.hvm then "/dev/xvda" else "nodev";
+    boot.loader.grub.device = if (cfg.hvm && !cfg.efi) then "/dev/xvda" else "nodev";
     boot.loader.grub.extraPerEntryConfig = mkIf (!cfg.hvm) "root (hd0)";
+    boot.loader.grub.efiSupport = cfg.efi;
+    boot.loader.grub.efiInstallAsRemovable = cfg.efi;
     boot.loader.timeout = 0;
 
     boot.initrd.network.enable = true;
@@ -137,7 +147,7 @@ in
     networking.timeServers = [ "169.254.169.123" ];
 
     # udisks has become too bloated to have in a headless system
-    # (e.g. it depends on GTK+).
+    # (e.g. it depends on GTK).
     services.udisks2.enable = false;
   };
 }
diff --git a/nixos/modules/virtualisation/amazon-options.nix b/nixos/modules/virtualisation/amazon-options.nix
index 15de8638bbab..2e807131e938 100644
--- a/nixos/modules/virtualisation/amazon-options.nix
+++ b/nixos/modules/virtualisation/amazon-options.nix
@@ -1,4 +1,4 @@
-{ config, lib, ... }:
+{ config, lib, pkgs, ... }:
 {
   options = {
     ec2 = {
@@ -9,6 +9,13 @@
           Whether the EC2 instance is a HVM instance.
         '';
       };
+      efi = lib.mkOption {
+        default = pkgs.stdenv.hostPlatform.isAarch64;
+        internal = true;
+        description = ''
+          Whether the EC2 instance is using EFI.
+        '';
+      };
     };
   };
 }
diff --git a/nixos/modules/virtualisation/railcar.nix b/nixos/modules/virtualisation/railcar.nix
new file mode 100644
index 000000000000..8b643e3b6d65
--- /dev/null
+++ b/nixos/modules/virtualisation/railcar.nix
@@ -0,0 +1,125 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.railcar;
+  generateUnit = name: containerConfig:
+    let
+      container = pkgs.ociTools.buildContainer {
+        args = [
+          (pkgs.writeShellScript "run.sh" containerConfig.cmd).outPath
+        ];
+      };
+    in
+      nameValuePair "railcar-${name}" {
+        enable = true;
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+            ExecStart = ''
+              ${cfg.package}/bin/railcar -r ${cfg.stateDir} run ${name} -b ${container}
+            '';
+            Type = containerConfig.runType;
+          };
+      };
+  mount = with types; (submodule {
+    options = {
+      type = mkOption {
+        type = string;
+        default = "none";
+        description = ''
+          The type of the filesystem to be mounted.
+          Linux: filesystem types supported by the kernel as listed in 
+          `/proc/filesystems` (e.g., "minix", "ext2", "ext3", "jfs", "xfs", 
+          "reiserfs", "msdos", "proc", "nfs", "iso9660"). For bind mounts 
+          (when options include either bind or rbind), the type is a dummy,
+          often "none" (not listed in /proc/filesystems).
+        '';
+      };
+      source = mkOption {
+        type = string;
+        description = "Source for the in-container mount";
+      };
+      options = mkOption {
+        type = loaOf (string);
+        default = [ "bind" ];
+        description = ''
+          Mount options of the filesystem to be used.
+        
+          Support optoions are listed in the mount(8) man page. Note that 
+          both filesystem-independent and filesystem-specific options 
+          are listed.
+        '';
+      };
+    };
+  });
+in
+{
+  options.services.railcar = {
+    enable = mkEnableOption "railcar";
+
+    containers = mkOption {
+      default = {};
+      description = "Declarative container configuration";
+      type = with types; loaOf (submodule ({ name, config, ... }: {
+        options = {
+          cmd = mkOption {
+            type = types.string;
+            description = "Command or script to run inside the container";
+          };
+
+          mounts = mkOption {
+            type = with types; attrsOf mount;
+            default = {};
+            description = ''
+              A set of mounts inside the container.
+
+              The defaults have been chosen for simple bindmounts, meaning
+              that you only need to provide the "source" parameter.
+            '';
+            example = ''
+              { "/data" = { source = "/var/lib/data"; }; }
+            '';
+          };
+
+          runType = mkOption {
+            type = types.string;
+            default = "oneshot";
+            description = "The systemd service run type";
+          };
+
+          os = mkOption {
+            type = types.string;
+            default = "linux";
+            description = "OS type of the container";
+          };
+
+          arch = mkOption {
+            type = types.string;
+            default = "x86_64";
+            description = "Computer architecture type of the container";
+          };
+        };
+      }));
+    };
+
+    stateDir = mkOption {
+      type = types.path;
+      default = ''/var/railcar'';
+      description = "Railcar persistent state directory";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.railcar;
+      description = "Railcar package to use";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services = flip mapAttrs' cfg.containers (name: containerConfig:
+      generateUnit name containerConfig
+    );
+  };
+}
+