summary refs log tree commit diff
path: root/nixos/modules
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules')
-rw-r--r--nixos/modules/config/networking.nix6
-rw-r--r--nixos/modules/config/pulseaudio.nix8
-rw-r--r--nixos/modules/config/system-path.nix4
-rw-r--r--nixos/modules/hardware/opengl.nix4
-rw-r--r--nixos/modules/hardware/video/amdgpu-pro.nix4
-rw-r--r--nixos/modules/hardware/video/ati.nix4
-rw-r--r--nixos/modules/hardware/video/nvidia.nix4
-rw-r--r--nixos/modules/installer/virtualbox-demo.nix38
-rw-r--r--nixos/modules/misc/ids.nix6
-rw-r--r--nixos/modules/misc/nixpkgs.nix1
-rw-r--r--nixos/modules/module-list.nix1
-rw-r--r--nixos/modules/profiles/clone-config.nix9
-rw-r--r--nixos/modules/profiles/installation-device.nix2
-rw-r--r--nixos/modules/programs/bash/bash.nix2
-rw-r--r--nixos/modules/programs/shell.nix4
-rw-r--r--nixos/modules/programs/sway-beta.nix33
-rw-r--r--nixos/modules/rename.nix5
-rw-r--r--nixos/modules/security/apparmor-suid.nix2
-rw-r--r--nixos/modules/security/dhparams.nix2
-rw-r--r--nixos/modules/security/rngd.nix4
-rw-r--r--nixos/modules/services/admin/salt/master.nix3
-rw-r--r--nixos/modules/services/admin/salt/minion.nix21
-rw-r--r--nixos/modules/services/cluster/kubernetes/dns.nix386
-rw-r--r--nixos/modules/services/computing/slurm/slurm.nix113
-rw-r--r--nixos/modules/services/databases/postgresql.nix12
-rw-r--r--nixos/modules/services/databases/postgresql.xml8
-rw-r--r--nixos/modules/services/development/jupyter/default.nix1
-rw-r--r--nixos/modules/services/hardware/upower.nix26
-rw-r--r--nixos/modules/services/mail/clamsmtp.nix2
-rw-r--r--nixos/modules/services/mail/dkimproxy-out.nix2
-rw-r--r--nixos/modules/services/mail/rspamd.nix80
-rw-r--r--nixos/modules/services/misc/gitlab.nix221
-rw-r--r--nixos/modules/services/misc/home-assistant.nix1
-rw-r--r--nixos/modules/services/misc/packagekit.nix3
-rw-r--r--nixos/modules/services/monitoring/kapacitor.nix154
-rw-r--r--nixos/modules/services/monitoring/prometheus/default.nix29
-rw-r--r--nixos/modules/services/networking/bitlbee.nix2
-rw-r--r--nixos/modules/services/networking/chrony.nix2
-rw-r--r--nixos/modules/services/networking/consul.nix7
-rw-r--r--nixos/modules/services/networking/ntpd.nix2
-rw-r--r--nixos/modules/services/networking/redsocks.nix2
-rw-r--r--nixos/modules/services/networking/syncthing.nix14
-rw-r--r--nixos/modules/services/networking/zerotierone.nix3
-rw-r--r--nixos/modules/services/search/solr.nix181
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix35
-rw-r--r--nixos/modules/services/web-servers/tomcat.nix27
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix2
-rw-r--r--nixos/modules/system/activation/activation-script.nix3
-rw-r--r--nixos/modules/system/boot/stage-1.nix2
-rw-r--r--nixos/modules/system/boot/systemd-nspawn.nix1
-rw-r--r--nixos/modules/system/boot/systemd.nix2
-rw-r--r--nixos/modules/tasks/filesystems.nix8
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix2
-rw-r--r--nixos/modules/virtualisation/containers.nix9
-rw-r--r--nixos/modules/virtualisation/docker-preloader.nix135
-rw-r--r--nixos/modules/virtualisation/google-compute-image.nix1
-rw-r--r--nixos/modules/virtualisation/libvirtd.nix2
-rw-r--r--nixos/modules/virtualisation/parallels-guest.nix4
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix5
-rw-r--r--nixos/modules/virtualisation/virtualbox-image.nix10
60 files changed, 1141 insertions, 525 deletions
diff --git a/nixos/modules/config/networking.nix b/nixos/modules/config/networking.nix
index 1eb6fb43604a..627cce67e97d 100644
--- a/nixos/modules/config/networking.nix
+++ b/nixos/modules/config/networking.nix
@@ -228,9 +228,6 @@ in
         # /etc/protocols: IP protocol numbers.
         "protocols".source  = pkgs.iana-etc + "/etc/protocols";
 
-        # /etc/rpc: RPC program numbers.
-        "rpc".source = pkgs.glibc.out + "/etc/rpc";
-
         # /etc/hosts: Hostname-to-IP mappings.
         "hosts".text = let
           oneToString = set: ip: ip + " " + concatStringsSep " " set.${ip};
@@ -268,6 +265,9 @@ in
         "resolv.conf".source = "${pkgs.systemd}/lib/systemd/resolv.conf";
       } // optionalAttrs (config.services.resolved.enable && dnsmasqResolve) {
         "dnsmasq-resolv.conf".source = "/run/systemd/resolve/resolv.conf";
+      } // optionalAttrs (pkgs.stdenv.hostPlatform.libc == "glibc") {
+        # /etc/rpc: RPC program numbers.
+        "rpc".source = pkgs.glibc.out + "/etc/rpc";
       };
 
       networking.proxy.envVars =
diff --git a/nixos/modules/config/pulseaudio.nix b/nixos/modules/config/pulseaudio.nix
index d4aa59506295..67f7105fe2fe 100644
--- a/nixos/modules/config/pulseaudio.nix
+++ b/nixos/modules/config/pulseaudio.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, pkgs_i686, ... }:
+{ config, lib, pkgs, ... }:
 
 with pkgs;
 with lib;
@@ -19,7 +19,7 @@ let
 
   # Forces 32bit pulseaudio and alsaPlugins to be built/supported for apps
   # using 32bit alsa on 64bit linux.
-  enable32BitAlsaPlugins = cfg.support32Bit && stdenv.isx86_64 && (pkgs_i686.alsaLib != null && pkgs_i686.libpulseaudio != null);
+  enable32BitAlsaPlugins = cfg.support32Bit && stdenv.isx86_64 && (pkgs.pkgsi686Linux.alsaLib != null && pkgs.pkgsi686Linux.libpulseaudio != null);
 
 
   myConfigFile =
@@ -63,7 +63,7 @@ let
     pcm_type.pulse {
       libs.native = ${pkgs.alsaPlugins}/lib/alsa-lib/libasound_module_pcm_pulse.so ;
       ${lib.optionalString enable32BitAlsaPlugins
-     "libs.32Bit = ${pkgs_i686.alsaPlugins}/lib/alsa-lib/libasound_module_pcm_pulse.so ;"}
+     "libs.32Bit = ${pkgs.pkgsi686Linux.alsaPlugins}/lib/alsa-lib/libasound_module_pcm_pulse.so ;"}
     }
     pcm.!default {
       type pulse
@@ -72,7 +72,7 @@ let
     ctl_type.pulse {
       libs.native = ${pkgs.alsaPlugins}/lib/alsa-lib/libasound_module_ctl_pulse.so ;
       ${lib.optionalString enable32BitAlsaPlugins
-     "libs.32Bit = ${pkgs_i686.alsaPlugins}/lib/alsa-lib/libasound_module_ctl_pulse.so ;"}
+     "libs.32Bit = ${pkgs.pkgsi686Linux.alsaPlugins}/lib/alsa-lib/libasound_module_ctl_pulse.so ;"}
     }
     ctl.!default {
       type pulse
diff --git a/nixos/modules/config/system-path.nix b/nixos/modules/config/system-path.nix
index c07e19bd03c4..1793dc628edf 100644
--- a/nixos/modules/config/system-path.nix
+++ b/nixos/modules/config/system-path.nix
@@ -19,7 +19,9 @@ let
       pkgs.diffutils
       pkgs.findutils
       pkgs.gawk
-      pkgs.glibc # for ldd, getent
+      pkgs.stdenv.cc.libc
+      pkgs.getent
+      pkgs.getconf
       pkgs.gnugrep
       pkgs.gnupatch
       pkgs.gnused
diff --git a/nixos/modules/hardware/opengl.nix b/nixos/modules/hardware/opengl.nix
index 46d06d71333a..48e0072e0892 100644
--- a/nixos/modules/hardware/opengl.nix
+++ b/nixos/modules/hardware/opengl.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, pkgs_i686, ... }:
+{ config, lib, pkgs, ... }:
 
 with lib;
 
@@ -148,7 +148,7 @@ in
       [ "/run/opengl-driver/share" ] ++ optional cfg.driSupport32Bit "/run/opengl-driver-32/share";
 
     hardware.opengl.package = mkDefault (makePackage pkgs);
-    hardware.opengl.package32 = mkDefault (makePackage pkgs_i686);
+    hardware.opengl.package32 = mkDefault (makePackage pkgs.pkgsi686Linux);
 
     boot.extraModulePackages = optional (elem "virtualbox" videoDrivers) kernelPackages.virtualboxGuestAdditions;
   };
diff --git a/nixos/modules/hardware/video/amdgpu-pro.nix b/nixos/modules/hardware/video/amdgpu-pro.nix
index 50af022b93c8..ab9e0c92020e 100644
--- a/nixos/modules/hardware/video/amdgpu-pro.nix
+++ b/nixos/modules/hardware/video/amdgpu-pro.nix
@@ -1,6 +1,6 @@
 # This module provides the proprietary AMDGPU-PRO drivers.
 
-{ config, lib, pkgs, pkgs_i686, ... }:
+{ config, lib, pkgs, ... }:
 
 with lib;
 
@@ -11,7 +11,7 @@ let
   enabled = elem "amdgpu-pro" drivers;
 
   package = config.boot.kernelPackages.amdgpu-pro;
-  package32 = pkgs_i686.linuxPackages.amdgpu-pro.override { libsOnly = true; kernel = null; };
+  package32 = pkgs.pkgsi686Linux.linuxPackages.amdgpu-pro.override { libsOnly = true; kernel = null; };
 
   opengl = config.hardware.opengl;
 
diff --git a/nixos/modules/hardware/video/ati.nix b/nixos/modules/hardware/video/ati.nix
index 2fa37af6ca58..6102919f0155 100644
--- a/nixos/modules/hardware/video/ati.nix
+++ b/nixos/modules/hardware/video/ati.nix
@@ -1,6 +1,6 @@
 # This module provides the proprietary ATI X11 / OpenGL drivers.
 
-{ config, lib, pkgs_i686, ... }:
+{ config, lib, pkgs, ... }:
 
 with lib;
 
@@ -24,7 +24,7 @@ in
       { name = "fglrx"; modules = [ ati_x11 ]; libPath = [ "${ati_x11}/lib" ]; };
 
     hardware.opengl.package = ati_x11;
-    hardware.opengl.package32 = pkgs_i686.linuxPackages.ati_drivers_x11.override { libsOnly = true; kernel = null; };
+    hardware.opengl.package32 = pkgs.pkgsi686Linux.linuxPackages.ati_drivers_x11.override { libsOnly = true; kernel = null; };
 
     environment.systemPackages = [ ati_x11 ];
 
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index f8524ab99e8a..21e12395498c 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -1,6 +1,6 @@
 # This module provides the proprietary NVIDIA X11 / OpenGL drivers.
 
-{ stdenv, config, lib, pkgs, pkgs_i686, ... }:
+{ stdenv, config, lib, pkgs, ... }:
 
 with lib;
 
@@ -25,7 +25,7 @@ let
   nvidia_x11 = nvidiaForKernel config.boot.kernelPackages;
   nvidia_libs32 =
     if versionOlder nvidia_x11.version "391" then
-      ((nvidiaForKernel pkgs_i686.linuxPackages).override { libsOnly = true; kernel = null; }).out
+      ((nvidiaForKernel pkgs.pkgsi686Linux.linuxPackages).override { libsOnly = true; kernel = null; }).out
     else
       (nvidiaForKernel config.boot.kernelPackages).lib32;
 
diff --git a/nixos/modules/installer/virtualbox-demo.nix b/nixos/modules/installer/virtualbox-demo.nix
index 8ca3592f3800..2e1b4b3998b5 100644
--- a/nixos/modules/installer/virtualbox-demo.nix
+++ b/nixos/modules/installer/virtualbox-demo.nix
@@ -22,4 +22,42 @@ with lib;
 
   powerManagement.enable = false;
   system.stateVersion = mkDefault "18.03";
+
+  installer.cloneConfigExtra = ''
+  # Let demo build as a trusted user.
+  # nix.trustedUsers = [ "demo" ];
+
+  # Mount a VirtualBox shared folder.
+  # This is configurable in the VirtualBox menu at
+  # Machine / Settings / Shared Folders.
+  # fileSystems."/mnt" = {
+  #   fsType = "vboxsf";
+  #   device = "nameofdevicetomount";
+  #   options = [ "rw" ];
+  # };
+
+  # By default, the NixOS VirtualBox demo image includes SDDM and Plasma.
+  # If you prefer another desktop manager or display manager, you may want
+  # to disable the default.
+  # services.xserver.desktopManager.plasma5.enable = lib.mkForce false;
+  # services.xserver.displayManager.sddm.enable = lib.mkForce false;
+
+  # Enable GDM/GNOME by uncommenting above two lines and two lines below.
+  # services.xserver.displayManager.gdm.enable = true;
+  # services.xserver.desktopManager.gnome3.enable = true;
+
+  # Set your time zone.
+  # time.timeZone = "Europe/Amsterdam";
+
+  # List packages installed in system profile. To search, run:
+  # \$ nix search wget
+  # environment.systemPackages = with pkgs; [
+  #   wget vim
+  # ];
+
+  # Enable the OpenSSH daemon.
+  # services.openssh.enable = true;
+
+  system.stateVersion = mkDefault "18.03";
+  '';
 }
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index 5c30e512a1b3..6e7f0a007bc2 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -331,6 +331,9 @@
       zeronet = 304;
       lirc = 305;
       lidarr = 306;
+      slurm = 307;
+      kapacitor = 308;
+      solr = 309;
 
       # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
 
@@ -622,6 +625,9 @@
       zeronet = 304;
       lirc = 305;
       lidarr = 306;
+      slurm = 307;
+      kapacitor = 308;
+      solr = 309;
 
       # When adding a gid, make sure it doesn't match an existing
       # uid. Users and groups with the same name should have equal
diff --git a/nixos/modules/misc/nixpkgs.nix b/nixos/modules/misc/nixpkgs.nix
index 7f9833e184ab..1dfd2c3c6cf3 100644
--- a/nixos/modules/misc/nixpkgs.nix
+++ b/nixos/modules/misc/nixpkgs.nix
@@ -208,7 +208,6 @@ in
   config = {
     _module.args = {
       pkgs = cfg.pkgs;
-      pkgs_i686 = cfg.pkgs.pkgsi686Linux;
     };
   };
 }
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 660644eade8d..37e90232da2a 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -433,6 +433,7 @@
   ./services/monitoring/hdaps.nix
   ./services/monitoring/heapster.nix
   ./services/monitoring/incron.nix
+  ./services/monitoring/kapacitor.nix
   ./services/monitoring/longview.nix
   ./services/monitoring/monit.nix
   ./services/monitoring/munin.nix
diff --git a/nixos/modules/profiles/clone-config.nix b/nixos/modules/profiles/clone-config.nix
index 99d4774584f1..3f669ba7d2e1 100644
--- a/nixos/modules/profiles/clone-config.nix
+++ b/nixos/modules/profiles/clone-config.nix
@@ -48,6 +48,8 @@ let
 
       {
         imports = [ ${toString config.installer.cloneConfigIncludes} ];
+
+        ${config.installer.cloneConfigExtra}
       }
     '';
 
@@ -73,6 +75,13 @@ in
       '';
     };
 
+    installer.cloneConfigExtra = mkOption {
+      default = "";
+      description = ''
+        Extra text to include in the cloned configuration.nix included in this
+        installer.
+      '';
+    };
   };
 
   config = {
diff --git a/nixos/modules/profiles/installation-device.nix b/nixos/modules/profiles/installation-device.nix
index 370db2b08452..580ea4a58e5b 100644
--- a/nixos/modules/profiles/installation-device.nix
+++ b/nixos/modules/profiles/installation-device.nix
@@ -63,7 +63,7 @@ with lib;
     # Tell the Nix evaluator to garbage collect more aggressively.
     # This is desirable in memory-constrained environments that don't
     # (yet) have swap set up.
-    environment.variables.GC_INITIAL_HEAP_SIZE = "100000";
+    environment.variables.GC_INITIAL_HEAP_SIZE = "1M";
 
     # Make the installer more likely to succeed in low memory
     # environments.  The kernel's overcommit heustistics bite us
diff --git a/nixos/modules/programs/bash/bash.nix b/nixos/modules/programs/bash/bash.nix
index 0fbc77ea44cf..d325fff6a572 100644
--- a/nixos/modules/programs/bash/bash.nix
+++ b/nixos/modules/programs/bash/bash.nix
@@ -16,7 +16,7 @@ let
     # programmable completion. If we do, enable all modules installed in
     # the system and user profile in obsolete /etc/bash_completion.d/
     # directories. Bash loads completions in all
-    # $XDG_DATA_DIRS/share/bash-completion/completions/
+    # $XDG_DATA_DIRS/bash-completion/completions/
     # on demand, so they do not need to be sourced here.
     if shopt -q progcomp &>/dev/null; then
       . "${pkgs.bash-completion}/etc/profile.d/bash_completion.sh"
diff --git a/nixos/modules/programs/shell.nix b/nixos/modules/programs/shell.nix
index 6aa0262e3a4c..9842e2bef643 100644
--- a/nixos/modules/programs/shell.nix
+++ b/nixos/modules/programs/shell.nix
@@ -13,7 +13,7 @@ with lib;
         # Set up the per-user profile.
         mkdir -m 0755 -p "$NIX_USER_PROFILE_DIR"
         if [ "$(stat --printf '%u' "$NIX_USER_PROFILE_DIR")" != "$(id -u)" ]; then
-            echo "WARNING: bad ownership on $NIX_USER_PROFILE_DIR, should be $(id -u)" >&2
+            echo "WARNING: the per-user profile dir $NIX_USER_PROFILE_DIR should belong to user id $(id -u)" >&2
         fi
 
         if [ -w "$HOME" ]; then
@@ -35,7 +35,7 @@ with lib;
           NIX_USER_GCROOTS_DIR="/nix/var/nix/gcroots/per-user/$USER"
           mkdir -m 0755 -p "$NIX_USER_GCROOTS_DIR"
           if [ "$(stat --printf '%u' "$NIX_USER_GCROOTS_DIR")" != "$(id -u)" ]; then
-              echo "WARNING: bad ownership on $NIX_USER_GCROOTS_DIR, should be $(id -u)" >&2
+              echo "WARNING: the per-user gcroots dir $NIX_USER_GCROOTS_DIR should belong to user id $(id -u)" >&2
           fi
 
           # Set up a default Nix expression from which to install stuff.
diff --git a/nixos/modules/programs/sway-beta.nix b/nixos/modules/programs/sway-beta.nix
index 04f2e0662b86..e651ea4cca33 100644
--- a/nixos/modules/programs/sway-beta.nix
+++ b/nixos/modules/programs/sway-beta.nix
@@ -5,6 +5,15 @@ with lib;
 let
   cfg = config.programs.sway-beta;
   swayPackage = cfg.package;
+
+  swayWrapped = pkgs.writeShellScriptBin "sway" ''
+    ${cfg.extraSessionCommands}
+    exec ${pkgs.dbus.dbus-launch} --exit-with-session ${swayPackage}/bin/sway
+  '';
+  swayJoined = pkgs.symlinkJoin {
+    name = "sway-joined";
+    paths = [ swayWrapped swayPackage ];
+  };
 in {
   options.programs.sway-beta = {
     enable = mkEnableOption ''
@@ -20,13 +29,30 @@ in {
       '';
     };
 
+    extraSessionCommands = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        export SDL_VIDEODRIVER=wayland
+        # needs qt5.qtwayland in systemPackages
+        export QT_QPA_PLATFORM=wayland
+        export QT_WAYLAND_DISABLE_WINDOWDECORATION="1"
+        # Fix for some Java AWT applications (e.g. Android Studio),
+        # use this if they aren't displayed properly:
+        export _JAVA_AWT_WM_NONREPARENTING=1
+      '';
+      description = ''
+        Shell commands executed just before Sway is started.
+      '';
+    };
+
     extraPackages = mkOption {
       type = with types; listOf package;
       default = with pkgs; [
-        xwayland dmenu
+        xwayland rxvt_unicode dmenu
       ];
       defaultText = literalExample ''
-        with pkgs; [ xwayland dmenu ];
+        with pkgs; [ xwayland rxvt_unicode dmenu ];
       '';
       example = literalExample ''
         with pkgs; [
@@ -42,7 +68,7 @@ in {
   };
 
   config = mkIf cfg.enable {
-    environment.systemPackages = [ swayPackage ] ++ cfg.extraPackages;
+    environment.systemPackages = [ swayJoined ] ++ cfg.extraPackages;
     security.pam.services.swaylock = {};
     hardware.opengl.enable = mkDefault true;
     fonts.enableDefaultFonts = mkDefault true;
@@ -51,4 +77,3 @@ in {
 
   meta.maintainers = with lib.maintainers; [ gnidorah primeos colemickens ];
 }
-
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index eb74b9bcac12..aa2b5c0b2dfb 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -28,7 +28,10 @@ with lib;
       (config:
         let enabled = getAttrFromPath [ "services" "printing" "gutenprint" ] config;
         in if enabled then [ pkgs.gutenprint ] else [ ]))
-    (mkRenamedOptionModule [ "services" "ddclient" "domain" ] [ "services" "ddclient" "domains" ])
+    (mkChangedOptionModule [ "services" "ddclient" "domain" ] [ "services" "ddclient" "domains" ]
+      (config:
+        let value = getAttrFromPath [ "services" "ddclient" "domain" ] config;
+        in if value != "" then [ value ] else []))
     (mkRemovedOptionModule [ "services" "ddclient" "homeDir" ] "")
     (mkRenamedOptionModule [ "services" "elasticsearch" "host" ] [ "services" "elasticsearch" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "graphite" "api" "host" ] [ "services" "graphite" "api" "listenAddress" ])
diff --git a/nixos/modules/security/apparmor-suid.nix b/nixos/modules/security/apparmor-suid.nix
index dfbf5d859ba9..498c2f25d1c0 100644
--- a/nixos/modules/security/apparmor-suid.nix
+++ b/nixos/modules/security/apparmor-suid.nix
@@ -28,7 +28,7 @@ with lib;
         capability setuid,
         network inet raw,
 
-        ${pkgs.glibc.out}/lib/*.so mr,
+        ${pkgs.stdenv.cc.libc.out}/lib/*.so mr,
         ${pkgs.libcap.lib}/lib/libcap.so* mr,
         ${pkgs.attr.out}/lib/libattr.so* mr,
 
diff --git a/nixos/modules/security/dhparams.nix b/nixos/modules/security/dhparams.nix
index e2b84c3e3b38..62a499ea624d 100644
--- a/nixos/modules/security/dhparams.nix
+++ b/nixos/modules/security/dhparams.nix
@@ -170,4 +170,6 @@ in {
       '';
     }) cfg.params;
   };
+
+  meta.maintainers = with lib.maintainers; [ ekleog ];
 }
diff --git a/nixos/modules/security/rngd.nix b/nixos/modules/security/rngd.nix
index 81e04a44b115..a54ef2e6fcad 100644
--- a/nixos/modules/security/rngd.nix
+++ b/nixos/modules/security/rngd.nix
@@ -20,7 +20,6 @@ with lib;
       KERNEL=="random", TAG+="systemd"
       SUBSYSTEM=="cpu", ENV{MODALIAS}=="cpu:type:x86,*feature:*009E*", TAG+="systemd", ENV{SYSTEMD_WANTS}+="rngd.service"
       KERNEL=="hw_random", TAG+="systemd", ENV{SYSTEMD_WANTS}+="rngd.service"
-      ${if config.services.tcsd.enable then "" else ''KERNEL=="tpm0", TAG+="systemd", ENV{SYSTEMD_WANTS}+="rngd.service"''}
     '';
 
     systemd.services.rngd = {
@@ -30,8 +29,7 @@ with lib;
 
       description = "Hardware RNG Entropy Gatherer Daemon";
 
-      serviceConfig.ExecStart = "${pkgs.rng-tools}/sbin/rngd -f -v" +
-        (if config.services.tcsd.enable then " --no-tpm=1" else "");
+      serviceConfig.ExecStart = "${pkgs.rng-tools}/sbin/rngd -f";
     };
   };
 }
diff --git a/nixos/modules/services/admin/salt/master.nix b/nixos/modules/services/admin/salt/master.nix
index 165580b97837..c6b1b0cc0bd8 100644
--- a/nixos/modules/services/admin/salt/master.nix
+++ b/nixos/modules/services/admin/salt/master.nix
@@ -53,6 +53,9 @@ in
         Type = "notify";
         NotifyAccess = "all";
       };
+      restartTriggers = [
+        config.environment.etc."salt/master".source
+      ];
     };
   };
 
diff --git a/nixos/modules/services/admin/salt/minion.nix b/nixos/modules/services/admin/salt/minion.nix
index 9ecefb32cfa8..c8fa9461a209 100644
--- a/nixos/modules/services/admin/salt/minion.nix
+++ b/nixos/modules/services/admin/salt/minion.nix
@@ -15,7 +15,6 @@ let
     # Default is in /etc/salt/pki/minion
     pki_dir = "/var/lib/salt/pki/minion";
   } cfg.configuration;
-  configDir = pkgs.writeTextDir "minion" (builtins.toJSON fullConfig);
 
 in
 
@@ -28,15 +27,24 @@ in
         default = {};
         description = ''
           Salt minion configuration as Nix attribute set.
-          See <link xlink:href="https://docs.saltstack.com/en/latest/ref/configuration/minion.html"/>                                                                                                 
-          for details.          
+          See <link xlink:href="https://docs.saltstack.com/en/latest/ref/configuration/minion.html"/>
+          for details.
         '';
       };
     };
   };
 
   config = mkIf cfg.enable {
-    environment.systemPackages = with pkgs; [ salt ];
+    environment = {
+      # Set this up in /etc/salt/minion so `salt-call`, etc. work.
+      # The alternatives are
+      # - passing --config-dir to all salt commands, not just the minion unit,
+      # - setting aglobal environment variable.
+      etc."salt/minion".source = pkgs.writeText "minion" (
+        builtins.toJSON fullConfig
+      );
+      systemPackages = with pkgs; [ salt ];
+    };
     systemd.services.salt-minion = {
       description = "Salt Minion";
       wantedBy = [ "multi-user.target" ];
@@ -45,11 +53,14 @@ in
         utillinux
       ];
       serviceConfig = {
-        ExecStart = "${pkgs.salt}/bin/salt-minion --config-dir=${configDir}";
+        ExecStart = "${pkgs.salt}/bin/salt-minion";
         LimitNOFILE = 8192;
         Type = "notify";
         NotifyAccess = "all";
       };
+      restartTriggers = [
+        config.environment.etc."salt/minion".source
+      ];
     };
   };
 }
diff --git a/nixos/modules/services/cluster/kubernetes/dns.nix b/nixos/modules/services/cluster/kubernetes/dns.nix
index 43bbb50a48d4..5a3e281ea694 100644
--- a/nixos/modules/services/cluster/kubernetes/dns.nix
+++ b/nixos/modules/services/cluster/kubernetes/dns.nix
@@ -3,8 +3,13 @@
 with lib;
 
 let
-  version = "1.14.10";
+  version = "1.2.5";
   cfg = config.services.kubernetes.addons.dns;
+  ports = {
+    dns = 10053;
+    health = 10054;
+    metrics = 10055;
+  };
 in {
   options.services.kubernetes.addons.dns = {
     enable = mkEnableOption "kubernetes dns addon";
@@ -27,49 +32,130 @@ in {
       type = types.str;
     };
 
-    kube-dns = mkOption {
-      description = "Docker image to seed for the kube-dns main container.";
-      type = types.attrs;
-      default = {
-        imageName = "k8s.gcr.io/k8s-dns-kube-dns-amd64";
-        imageDigest = "sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8";
-        finalImageTag = version;
-        sha256 = "0x583znk9smqn0fix7ld8sm5jgaxhqhx3fq97b1wkqm7iwhvl3pj";
-      };
-    };
-
-    dnsmasq-nanny = mkOption {
-      description = "Docker image to seed for the kube-dns dnsmasq container.";
-      type = types.attrs;
-      default = {
-        imageName = "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64";
-        imageDigest = "sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8";
-        finalImageTag = version;
-        sha256 = "1fihml7s2mfwgac51cbqpylkwbivc8nyhgi4vb820s83zvl8a6y1";
-      };
+    replicas = mkOption {
+      description = "Number of DNS pod replicas to deploy in the cluster.";
+      default = 2;
+      type = types.int;
     };
 
-    sidecar = mkOption {
-      description = "Docker image to seed for the kube-dns sidecar container.";
+    coredns = mkOption {
+      description = "Docker image to seed for the CoreDNS container.";
       type = types.attrs;
       default = {
-        imageName = "k8s.gcr.io/k8s-dns-sidecar-amd64";
-        imageDigest = "sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4";
+        imageName = "coredns/coredns";
+        imageDigest = "sha256:33c8da20b887ae12433ec5c40bfddefbbfa233d5ce11fb067122e68af30291d6";
         finalImageTag = version;
-        sha256 = "08l1bv5jgrhvjzpqpbinrkgvv52snc4fzyd8ya9v18ns2klyz7m0";
+        sha256 = "13q19rgwapv27xcs664dw502254yw4zw63insf6g2danidv2mg6i";
       };
     };
   };
 
   config = mkIf cfg.enable {
-    services.kubernetes.kubelet.seedDockerImages = with pkgs.dockerTools; [
-      (pullImage cfg.kube-dns)
-      (pullImage cfg.dnsmasq-nanny)
-      (pullImage cfg.sidecar)
-    ];
+    services.kubernetes.kubelet.seedDockerImages =
+      singleton (pkgs.dockerTools.pullImage cfg.coredns);
 
     services.kubernetes.addonManager.addons = {
-      kubedns-deployment = {
+      coredns-sa = {
+        apiVersion = "v1";
+        kind = "ServiceAccount";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+      };
+
+      coredns-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRole";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/bootstrapping" = "rbac-defaults";
+          };
+          name = "system:coredns";
+        };
+        rules = [
+          {
+            apiGroups = [ "" ];
+            resources = [ "endpoints" "services" "pods" "namespaces" ];
+            verbs = [ "list" "watch" ];
+          }
+          {
+            apiGroups = [ "" ];
+            resources = [ "nodes" ];
+            verbs = [ "get" ];
+          }
+        ];
+      };
+
+      coredns-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRoleBinding";
+        metadata = {
+          annotations = {
+            "rbac.authorization.kubernetes.io/autoupdate" = "true";
+          };
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/bootstrapping" = "rbac-defaults";
+          };
+          name = "system:coredns";
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "system:coredns";
+        };
+        subjects = [
+          {
+            kind = "ServiceAccount";
+            name = "coredns";
+            namespace = "kube-system";
+          }
+        ];
+      };
+
+      coredns-cm = {
+        apiVersion = "v1";
+        kind = "ConfigMap";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+        data = {
+          Corefile = ".:${toString ports.dns} {
+            errors
+            health :${toString ports.health}
+            kubernetes ${cfg.clusterDomain} in-addr.arpa ip6.arpa {
+              pods insecure
+              upstream
+              fallthrough in-addr.arpa ip6.arpa
+            }
+            prometheus :${toString ports.metrics}
+            proxy . /etc/resolv.conf
+            cache 30
+            loop
+            reload
+            loadbalance
+          }";
+        };
+      };
+
+      coredns-deploy = {
         apiVersion = "extensions/v1beta1";
         kind = "Deployment";
         metadata = {
@@ -77,183 +163,97 @@ in {
             "addonmanager.kubernetes.io/mode" = "Reconcile";
             "k8s-app" = "kube-dns";
             "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/name" = "CoreDNS";
           };
-          name = "kube-dns";
+          name = "coredns";
           namespace = "kube-system";
         };
         spec = {
-          selector.matchLabels."k8s-app" = "kube-dns";
+          replicas = cfg.replicas;
+          selector = {
+            matchLabels = { k8s-app = "kube-dns"; };
+          };
           strategy = {
-            rollingUpdate = {
-              maxSurge = "10%";
-              maxUnavailable = 0;
-            };
+            rollingUpdate = { maxUnavailable = 1; };
+            type = "RollingUpdate";
           };
           template = {
             metadata = {
-              annotations."scheduler.alpha.kubernetes.io/critical-pod" = "";
-              labels.k8s-app = "kube-dns";
+              labels = {
+                k8s-app = "kube-dns";
+              };
             };
             spec = {
-              priorityClassName = "system-cluster-critical";
               containers = [
                 {
-                  name = "kubedns";
-                  image = with cfg.kube-dns; "${imageName}:${finalImageTag}";
-                  resources = {
-                    limits.memory = "170Mi";
-                    requests = {
-                      cpu = "100m";
-                      memory = "70Mi";
-                    };
-                  };
+                  args = [ "-conf" "/etc/coredns/Corefile" ];
+                  image = with cfg.coredns; "${imageName}:${finalImageTag}";
+                  imagePullPolicy = "Never";
                   livenessProbe = {
                     failureThreshold = 5;
                     httpGet = {
-                      path = "/healthcheck/kubedns";
-                      port = 10054;
+                      path = "/health";
+                      port = ports.health;
                       scheme = "HTTP";
                     };
                     initialDelaySeconds = 60;
                     successThreshold = 1;
                     timeoutSeconds = 5;
                   };
-                  readinessProbe = {
-                    httpGet = {
-                      path = "/readiness";
-                      port = 8081;
-                      scheme = "HTTP";
-                    };
-                    initialDelaySeconds = 3;
-                    timeoutSeconds = 5;
-                  };
-                  args = [
-                    "--domain=${cfg.clusterDomain}"
-                    "--dns-port=10053"
-                    "--config-dir=/kube-dns-config"
-                    "--v=2"
-                  ];
-                  env = [
-                    {
-                      name = "PROMETHEUS_PORT";
-                      value = "10055";
-                    }
-                  ];
+                  name = "coredns";
                   ports = [
                     {
-                      containerPort = 10053;
-                      name = "dns-local";
+                      containerPort = ports.dns;
+                      name = "dns";
                       protocol = "UDP";
                     }
                     {
-                      containerPort = 10053;
-                      name = "dns-tcp-local";
+                      containerPort = ports.dns;
+                      name = "dns-tcp";
                       protocol = "TCP";
                     }
                     {
-                      containerPort = 10055;
+                      containerPort = ports.metrics;
                       name = "metrics";
                       protocol = "TCP";
                     }
                   ];
-                  volumeMounts = [
-                    {
-                      mountPath = "/kube-dns-config";
-                      name = "kube-dns-config";
-                    }
-                  ];
-                }
-                {
-                  name = "dnsmasq";
-                  image = with cfg.dnsmasq-nanny; "${imageName}:${finalImageTag}";
-                  livenessProbe = {
-                    httpGet = {
-                      path = "/healthcheck/dnsmasq";
-                      port = 10054;
-                      scheme = "HTTP";
-                    };
-                    initialDelaySeconds = 60;
-                    timeoutSeconds = 5;
-                    successThreshold = 1;
-                    failureThreshold = 5;
-                  };
-                  args = [
-                    "-v=2"
-                    "-logtostderr"
-                    "-configDir=/etc/k8s/dns/dnsmasq-nanny"
-                    "-restartDnsmasq=true"
-                    "--"
-                    "-k"
-                    "--cache-size=1000"
-                    "--log-facility=-"
-                    "--server=/${cfg.clusterDomain}/127.0.0.1#10053"
-                    "--server=/in-addr.arpa/127.0.0.1#10053"
-                    "--server=/ip6.arpa/127.0.0.1#10053"
-                  ];
-                  ports = [
-                    {
-                      containerPort = 53;
-                      name = "dns";
-                      protocol = "UDP";
-                    }
-                    {
-                      containerPort = 53;
-                      name = "dns-tcp";
-                      protocol = "TCP";
-                    }
-                  ];
                   resources = {
+                    limits = {
+                      memory = "170Mi";
+                    };
                     requests = {
-                      cpu = "150m";
-                      memory = "20Mi";
+                      cpu = "100m";
+                      memory = "70Mi";
                     };
                   };
-                  volumeMounts = [
-                    {
-                      mountPath = "/etc/k8s/dns/dnsmasq-nanny";
-                      name = "kube-dns-config";
-                    }
-                  ];
-                }
-                {
-                  name = "sidecar";
-                  image = with cfg.sidecar; "${imageName}:${finalImageTag}";
-                  livenessProbe = {
-                    httpGet = {
-                      path = "/metrics";
-                      port = 10054;
-                      scheme = "HTTP";
+                  securityContext = {
+                    allowPrivilegeEscalation = false;
+                    capabilities = {
+                      drop = [ "all" ];
                     };
-                    initialDelaySeconds = 60;
-                    timeoutSeconds = 5;
-                    successThreshold = 1;
-                    failureThreshold = 5;
+                    readOnlyRootFilesystem = true;
                   };
-                  args = [
-                    "--v=2"
-                    "--logtostderr"
-                    "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
-                    "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
-                  ];
-                  ports = [
+                  volumeMounts = [
                     {
-                      containerPort = 10054;
-                      name = "metrics";
-                      protocol = "TCP";
+                      mountPath = "/etc/coredns";
+                      name = "config-volume";
+                      readOnly = true;
                     }
                   ];
-                  resources = {
-                    requests = {
-                      cpu = "10m";
-                      memory = "20Mi";
-                    };
-                  };
                 }
               ];
               dnsPolicy = "Default";
-              serviceAccountName = "kube-dns";
+              nodeSelector = {
+                "beta.kubernetes.io/os" = "linux";
+              };
+              serviceAccountName = "coredns";
               tolerations = [
                 {
+                  effect = "NoSchedule";
+                  key = "node-role.kubernetes.io/master";
+                }
+                {
                   key = "CriticalAddonsOnly";
                   operator = "Exists";
                 }
@@ -261,10 +261,15 @@ in {
               volumes = [
                 {
                   configMap = {
-                    name = "kube-dns";
-                    optional = true;
+                    items = [
+                      {
+                        key = "Corefile";
+                        path = "Corefile";
+                      }
+                    ];
+                    name = "coredns";
                   };
-                  name = "kube-dns-config";
+                  name = "config-volume";
                 }
               ];
             };
@@ -272,51 +277,40 @@ in {
         };
       };
 
-      kubedns-svc = {
+      coredns-svc = {
         apiVersion = "v1";
         kind = "Service";
         metadata = {
+          annotations = {
+            "prometheus.io/port" = toString ports.metrics;
+            "prometheus.io/scrape" = "true";
+          };
           labels = {
             "addonmanager.kubernetes.io/mode" = "Reconcile";
             "k8s-app" = "kube-dns";
             "kubernetes.io/cluster-service" = "true";
-            "kubernetes.io/name" = "KubeDNS";
+            "kubernetes.io/name" = "CoreDNS";
           };
           name = "kube-dns";
-          namespace  = "kube-system";
+          namespace = "kube-system";
         };
         spec = {
           clusterIP = cfg.clusterIp;
           ports = [
-            {name = "dns"; port = 53; protocol = "UDP";}
-            {name = "dns-tcp"; port = 53; protocol = "TCP";}
+            {
+              name = "dns";
+              port = 53;
+              targetPort = ports.dns;
+              protocol = "UDP";
+            }
+            {
+              name = "dns-tcp";
+              port = 53;
+              targetPort = ports.dns;
+              protocol = "TCP";
+            }
           ];
-          selector.k8s-app = "kube-dns";
-        };
-      };
-
-      kubedns-sa = {
-        apiVersion = "v1";
-        kind = "ServiceAccount";
-        metadata = {
-          name = "kube-dns";
-          namespace = "kube-system";
-          labels = {
-            "kubernetes.io/cluster-service" = "true";
-            "addonmanager.kubernetes.io/mode" = "Reconcile";
-          };
-        };
-      };
-
-      kubedns-cm = {
-        apiVersion = "v1";
-        kind = "ConfigMap";
-        metadata = {
-          name = "kube-dns";
-          namespace = "kube-system";
-          labels = {
-            "addonmanager.kubernetes.io/mode" = "EnsureExists";
-          };
+          selector = { k8s-app = "kube-dns"; };
         };
       };
     };
diff --git a/nixos/modules/services/computing/slurm/slurm.nix b/nixos/modules/services/computing/slurm/slurm.nix
index 09174ed39f5e..cd481212db2d 100644
--- a/nixos/modules/services/computing/slurm/slurm.nix
+++ b/nixos/modules/services/computing/slurm/slurm.nix
@@ -6,13 +6,18 @@ let
 
   cfg = config.services.slurm;
   # configuration file can be generated by http://slurm.schedmd.com/configurator.html
+
+  defaultUser = "slurm";
+
   configFile = pkgs.writeTextDir "slurm.conf"
     ''
       ClusterName=${cfg.clusterName}
+      StateSaveLocation=${cfg.stateSaveLocation}
+      SlurmUser=${cfg.user}
       ${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
       ${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
-      ${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
-      ${optionalString (cfg.partitionName != null) ''partitionName=${cfg.partitionName}''}
+      ${toString (map (x: "NodeName=${x}\n") cfg.nodeName)}
+      ${toString (map (x: "PartitionName=${x}\n") cfg.partitionName)}
       PlugStackConfig=${plugStackConfig}
       ProctrackType=${cfg.procTrackType}
       ${cfg.extraConfig}
@@ -24,12 +29,19 @@ let
       ${cfg.extraPlugstackConfig}
     '';
 
-
   cgroupConfig = pkgs.writeTextDir "cgroup.conf"
    ''
      ${cfg.extraCgroupConfig}
    '';
 
+  slurmdbdConf = pkgs.writeTextDir "slurmdbd.conf"
+   ''
+     DbdHost=${cfg.dbdserver.dbdHost}
+     SlurmUser=${cfg.user}
+     StorageType=accounting_storage/mysql
+     ${cfg.dbdserver.extraConfig}
+   '';
+
   # slurm expects some additional config files to be
   # in the same directory as slurm.conf
   etcSlurm = pkgs.symlinkJoin {
@@ -43,6 +55,8 @@ in
 
   ###### interface
 
+  meta.maintainers = [ maintainers.markuskowa ];
+
   options = {
 
     services.slurm = {
@@ -60,6 +74,27 @@ in
         };
       };
 
+      dbdserver = {
+        enable = mkEnableOption "SlurmDBD service";
+
+        dbdHost = mkOption {
+          type = types.str;
+          default = config.networking.hostName;
+          description = ''
+            Hostname of the machine where <literal>slurmdbd</literal>
+            is running (i.e. name returned by <literal>hostname -s</literal>).
+          '';
+        };
+
+        extraConfig = mkOption {
+          type = types.lines;
+          default = "";
+          description = ''
+            Extra configuration for <literal>slurmdbd.conf</literal>
+          '';
+        };
+      };
+
       client = {
         enable = mkEnableOption "slurm client daemon";
       };
@@ -116,9 +151,9 @@ in
       };
 
       nodeName = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        example = "linux[1-32] CPUs=1 State=UNKNOWN";
+        type = types.listOf types.str;
+        default = [];
+        example = literalExample ''[ "linux[1-32] CPUs=1 State=UNKNOWN" ];'';
         description = ''
           Name that SLURM uses to refer to a node (or base partition for BlueGene
           systems). Typically this would be the string that "/bin/hostname -s"
@@ -127,9 +162,9 @@ in
       };
 
       partitionName = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        example = "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP";
+        type = types.listOf types.str;
+        default = [];
+        example = literalExample ''[ "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP" ];'';
         description = ''
           Name by which the partition may be referenced. Note that now you have
           to write the partition's parameters after the name.
@@ -150,7 +185,7 @@ in
       };
 
       procTrackType = mkOption {
-        type = types.string;
+        type = types.str;
         default = "proctrack/linuxproc";
         description = ''
           Plugin to be used for process tracking on a job step basis.
@@ -159,6 +194,25 @@ in
         '';
       };
 
+      stateSaveLocation = mkOption {
+        type = types.str;
+        default = "/var/spool/slurmctld";
+        description = ''
+          Directory into which the Slurm controller, slurmctld, saves its state.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = defaultUser;
+        description = ''
+          Set this option when you want to run the slurmctld daemon
+          as something else than the default slurm user "slurm".
+          Note that the UID of this user needs to be the same
+          on all nodes.
+        '';
+      };
+
       extraConfig = mkOption {
         default = "";
         type = types.lines;
@@ -184,6 +238,8 @@ in
           used when <literal>procTrackType=proctrack/cgroup</literal>.
         '';
       };
+
+
     };
 
   };
@@ -220,12 +276,24 @@ in
         '';
       };
 
-  in mkIf (cfg.enableStools || cfg.client.enable || cfg.server.enable) {
+  in mkIf ( cfg.enableStools ||
+            cfg.client.enable ||
+            cfg.server.enable ||
+            cfg.dbdserver.enable ) {
 
     environment.systemPackages = [ wrappedSlurm ];
 
     services.munge.enable = mkDefault true;
 
+    # use a static uid as default to ensure it is the same on all nodes
+    users.users.slurm = mkIf (cfg.user == defaultUser) {
+      name = defaultUser;
+      group = "slurm";
+      uid = config.ids.uids.slurm;
+    };
+
+    users.groups.slurm.gid = config.ids.uids.slurm;
+
     systemd.services.slurmd = mkIf (cfg.client.enable) {
       path = with pkgs; [ wrappedSlurm coreutils ]
         ++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
@@ -261,6 +329,29 @@ in
         PIDFile = "/run/slurmctld.pid";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
       };
+
+      preStart = ''
+        mkdir -p ${cfg.stateSaveLocation}
+        chown -R ${cfg.user}:slurm ${cfg.stateSaveLocation}
+      '';
+    };
+
+    systemd.services.slurmdbd = mkIf (cfg.dbdserver.enable) {
+      path = with pkgs; [ wrappedSlurm munge coreutils ];
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "munged.service" "mysql.service" ];
+      requires = [ "munged.service" "mysql.service" ];
+
+      # slurm strips the last component off the path
+      environment.SLURM_CONF = "${slurmdbdConf}/slurm.conf";
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${cfg.package}/bin/slurmdbd";
+        PIDFile = "/run/slurmdbd.pid";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      };
     };
 
   };
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index de2a757196a5..f592be0e768b 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -55,7 +55,7 @@ in
 
       package = mkOption {
         type = types.package;
-        example = literalExample "pkgs.postgresql96";
+        example = literalExample "pkgs.postgresql_9_6";
         description = ''
           PostgreSQL package to use.
         '';
@@ -118,7 +118,7 @@ in
       extraPlugins = mkOption {
         type = types.listOf types.path;
         default = [];
-        example = literalExample "[ (pkgs.postgis.override { postgresql = pkgs.postgresql94; }) ]";
+        example = literalExample "[ (pkgs.postgis.override { postgresql = pkgs.postgresql_9_4; }) ]";
         description = ''
           When this list contains elements a new store path is created.
           PostgreSQL and the elements are symlinked into it. Then pg_config,
@@ -167,9 +167,9 @@ in
       # Note: when changing the default, make it conditional on
       # ‘system.stateVersion’ to maintain compatibility with existing
       # systems!
-      mkDefault (if versionAtLeast config.system.stateVersion "17.09" then pkgs.postgresql96
-            else if versionAtLeast config.system.stateVersion "16.03" then pkgs.postgresql95
-            else pkgs.postgresql94);
+      mkDefault (if versionAtLeast config.system.stateVersion "17.09" then pkgs.postgresql_9_6
+            else if versionAtLeast config.system.stateVersion "16.03" then pkgs.postgresql_9_5
+            else pkgs.postgresql_9_4);
 
     services.postgresql.dataDir =
       mkDefault (if versionAtLeast config.system.stateVersion "17.09" then "/var/lib/postgresql/${config.services.postgresql.package.psqlSchema}"
@@ -271,5 +271,5 @@ in
   };
 
   meta.doc = ./postgresql.xml;
-
+  meta.maintainers = with lib.maintainers; [ thoughtpolice ];
 }
diff --git a/nixos/modules/services/databases/postgresql.xml b/nixos/modules/services/databases/postgresql.xml
index f89f0d653164..14f4d4909bc0 100644
--- a/nixos/modules/services/databases/postgresql.xml
+++ b/nixos/modules/services/databases/postgresql.xml
@@ -27,12 +27,12 @@
    <filename>configuration.nix</filename>:
 <programlisting>
 <xref linkend="opt-services.postgresql.enable"/> = true;
-<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql94;
+<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_9_4;
 </programlisting>
    Note that you are required to specify the desired version of PostgreSQL
-   (e.g. <literal>pkgs.postgresql94</literal>). Since upgrading your PostgreSQL
-   version requires a database dump and reload (see below), NixOS cannot
-   provide a default value for
+   (e.g. <literal>pkgs.postgresql_9_4</literal>). Since upgrading your
+   PostgreSQL version requires a database dump and reload (see below), NixOS
+   cannot provide a default value for
    <xref linkend="opt-services.postgresql.package"/> such as the most recent
    release of PostgreSQL.
   </para>
diff --git a/nixos/modules/services/development/jupyter/default.nix b/nixos/modules/services/development/jupyter/default.nix
index 9fcc00431865..f20860af6e12 100644
--- a/nixos/modules/services/development/jupyter/default.nix
+++ b/nixos/modules/services/development/jupyter/default.nix
@@ -145,6 +145,7 @@ in {
       systemd.services.jupyter = {
         description = "Jupyter development server";
 
+        after = [ "network.target" ];
         wantedBy = [ "multi-user.target" ];
 
         # TODO: Patch notebook so we can explicitly pass in a shell
diff --git a/nixos/modules/services/hardware/upower.nix b/nixos/modules/services/hardware/upower.nix
index 2198842a4511..1da47349c077 100644
--- a/nixos/modules/services/hardware/upower.nix
+++ b/nixos/modules/services/hardware/upower.nix
@@ -56,6 +56,32 @@ in
           { Type = "dbus";
             BusName = "org.freedesktop.UPower";
             ExecStart = "@${cfg.package}/libexec/upowerd upowerd";
+            Restart = "on-failure";
+            # Upstream lockdown:
+            # Filesystem lockdown
+            ProtectSystem = "strict";
+            # Needed by keyboard backlight support
+            ProtectKernelTunables = false;
+            ProtectControlGroups = true;
+            ReadWritePaths = "/var/lib/upower";
+            ProtectHome = true;
+            PrivateTmp = true;
+
+            # Network
+            # PrivateNetwork=true would block udev's netlink socket
+            RestrictAddressFamilies = "AF_UNIX AF_NETLINK";
+
+            # Execute Mappings
+            MemoryDenyWriteExecute = true;
+
+            # Modules
+            ProtectKernelModules = true;
+
+            # Real-time
+            RestrictRealtime = true;
+
+            # Privilege escalation
+            NoNewPrivileges = true;
           };
       };
 
diff --git a/nixos/modules/services/mail/clamsmtp.nix b/nixos/modules/services/mail/clamsmtp.nix
index 8f4f39aa7288..fc1267c5d280 100644
--- a/nixos/modules/services/mail/clamsmtp.nix
+++ b/nixos/modules/services/mail/clamsmtp.nix
@@ -176,4 +176,6 @@ in
         }
       ) cfg.instances);
     };
+
+  meta.maintainers = with lib.maintainers; [ ekleog ];
 }
diff --git a/nixos/modules/services/mail/dkimproxy-out.nix b/nixos/modules/services/mail/dkimproxy-out.nix
index 894b88e25c1b..f4ac9e47007a 100644
--- a/nixos/modules/services/mail/dkimproxy-out.nix
+++ b/nixos/modules/services/mail/dkimproxy-out.nix
@@ -115,4 +115,6 @@ in
         };
       };
     };
+
+  meta.maintainers = with lib.maintainers; [ ekleog ];
 }
diff --git a/nixos/modules/services/mail/rspamd.nix b/nixos/modules/services/mail/rspamd.nix
index ff01a5dee53d..78a2f37b6ded 100644
--- a/nixos/modules/services/mail/rspamd.nix
+++ b/nixos/modules/services/mail/rspamd.nix
@@ -127,11 +127,15 @@ let
       options {
         pidfile = "$RUNDIR/rspamd.pid";
         .include "$CONFDIR/options.inc"
+        .include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/options.inc"
+        .include(try=true; priority=10) "$LOCAL_CONFDIR/override.d/options.inc"
       }
 
       logging {
         type = "syslog";
         .include "$CONFDIR/logging.inc"
+        .include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/logging.inc"
+        .include(try=true; priority=10) "$LOCAL_CONFDIR/override.d/logging.inc"
       }
 
       ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
@@ -149,6 +153,42 @@ let
       ${cfg.extraConfig}
    '';
 
+  filterFiles = files: filterAttrs (n: v: v.enable) files;
+  rspamdDir = pkgs.linkFarm "etc-rspamd-dir" (
+    (mapAttrsToList (name: file: { name = "local.d/${name}"; path = file.source; }) (filterFiles cfg.locals)) ++
+    (mapAttrsToList (name: file: { name = "override.d/${name}"; path = file.source; }) (filterFiles cfg.overrides)) ++
+    (optional (cfg.localLuaRules != null) { name = "rspamd.local.lua"; path = cfg.localLuaRules; }) ++
+    [ { name = "rspamd.conf"; path = rspamdConfFile; } ]
+  );
+
+  configFileModule = prefix: { name, config, ... }: {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Whether this file ${prefix} should be generated.  This
+          option allows specific ${prefix} files to be disabled.
+        '';
+      };
+
+      text = mkOption {
+        default = null;
+        type = types.nullOr types.lines;
+        description = "Text of the file.";
+      };
+
+      source = mkOption {
+        type = types.path;
+        description = "Path of the source file.";
+      };
+    };
+    config = {
+      source = mkIf (config.text != null) (
+        let name' = "rspamd-${prefix}-" + baseNameOf name;
+        in mkDefault (pkgs.writeText name' config.text));
+    };
+  };
 in
 
 {
@@ -167,6 +207,41 @@ in
         description = "Whether to run the rspamd daemon in debug mode.";
       };
 
+      locals = mkOption {
+        type = with types; attrsOf (submodule (configFileModule "locals"));
+        default = {};
+        description = ''
+          Local configuration files, written into <filename>/etc/rspamd/local.d/{name}</filename>.
+        '';
+        example = literalExample ''
+          { "redis.conf".source = "/nix/store/.../etc/dir/redis.conf";
+            "arc.conf".text = "allow_envfrom_empty = true;";
+          }
+        '';
+      };
+
+      overrides = mkOption {
+        type = with types; attrsOf (submodule (configFileModule "overrides"));
+        default = {};
+        description = ''
+          Overridden configuration files, written into <filename>/etc/rspamd/override.d/{name}</filename>.
+        '';
+        example = literalExample ''
+          { "redis.conf".source = "/nix/store/.../etc/dir/redis.conf";
+            "arc.conf".text = "allow_envfrom_empty = true;";
+          }
+        '';
+      };
+
+      localLuaRules = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        description = ''
+          Path of file to link to <filename>/etc/rspamd/rspamd.local.lua</filename> for local
+          rules written in Lua
+        '';
+      };
+
       workers = mkOption {
         type = with types; attrsOf (submodule workerOpts);
         description = ''
@@ -242,16 +317,17 @@ in
       gid = config.ids.gids.rspamd;
     };
 
-    environment.etc."rspamd.conf".source = rspamdConfFile;
+    environment.etc."rspamd".source = rspamdDir;
 
     systemd.services.rspamd = {
       description = "Rspamd Service";
 
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
+      restartTriggers = [ rspamdDir ];
 
       serviceConfig = {
-        ExecStart = "${pkgs.rspamd}/bin/rspamd ${optionalString cfg.debug "-d"} --user=${cfg.user} --group=${cfg.group} --pid=/run/rspamd.pid -c ${rspamdConfFile} -f";
+        ExecStart = "${pkgs.rspamd}/bin/rspamd ${optionalString cfg.debug "-d"} --user=${cfg.user} --group=${cfg.group} --pid=/run/rspamd.pid -c /etc/rspamd/rspamd.conf -f";
         Restart = "always";
         RuntimeDirectory = "rspamd";
         PrivateTmp = true;
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index 8ea831afb7c1..aa72cda70453 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -14,15 +14,16 @@ let
   pathUrlQuote = url: replaceStrings ["/"] ["%2F"] url;
   pgSuperUser = config.services.postgresql.superUser;
 
-  databaseYml = ''
-    production:
-      adapter: postgresql
-      database: ${cfg.databaseName}
-      host: ${cfg.databaseHost}
-      password: ${cfg.databasePassword}
-      username: ${cfg.databaseUsername}
-      encoding: utf8
-  '';
+  databaseConfig = {
+    production = {
+      adapter = "postgresql";
+      database = cfg.databaseName;
+      host = cfg.databaseHost;
+      password = cfg.databasePassword;
+      username = cfg.databaseUsername;
+      encoding = "utf8";
+    };
+  };
 
   gitalyToml = pkgs.writeText "gitaly.toml" ''
     socket_path = "${lib.escape ["\""] gitalySocket}"
@@ -45,35 +46,31 @@ let
     '') gitlabConfig.production.repositories.storages))}
   '';
 
-  gitlabShellYml = ''
-    user: ${cfg.user}
-    gitlab_url: "http+unix://${pathUrlQuote gitlabSocket}"
-    http_settings:
-      self_signed_cert: false
-    repos_path: "${cfg.statePath}/repositories"
-    secret_file: "${cfg.statePath}/config/gitlab_shell_secret"
-    log_file: "${cfg.statePath}/log/gitlab-shell.log"
-    custom_hooks_dir: "${cfg.statePath}/custom_hooks"
-    redis:
-      bin: ${pkgs.redis}/bin/redis-cli
-      host: 127.0.0.1
-      port: 6379
-      database: 0
-      namespace: resque:gitlab
-  '';
+  gitlabShellConfig = {
+    user = cfg.user;
+    gitlab_url = "http+unix://${pathUrlQuote gitlabSocket}";
+    http_settings.self_signed_cert = false;
+    repos_path = "${cfg.statePath}/repositories";
+    secret_file = "${cfg.statePath}/config/gitlab_shell_secret";
+    log_file = "${cfg.statePath}/log/gitlab-shell.log";
+    custom_hooks_dir = "${cfg.statePath}/custom_hooks";
+    redis = {
+      bin = "${pkgs.redis}/bin/redis-cli";
+      host = "127.0.0.1";
+      port = 6379;
+      database = 0;
+      namespace = "resque:gitlab";
+    };
+  };
 
-  redisYml = ''
-    production:
-      url: redis://localhost:6379/
-  '';
+  redisConfig.production.url = "redis://localhost:6379/";
 
-  secretsYml = ''
-    production:
-      secret_key_base: ${cfg.secrets.secret}
-      otp_key_base: ${cfg.secrets.otp}
-      db_key_base: ${cfg.secrets.db}
-      openid_connect_signing_key: ${builtins.toJSON cfg.secrets.jws}
-  '';
+  secretsConfig.production = {
+    secret_key_base = cfg.secrets.secret;
+    otp_key_base = cfg.secrets.otp;
+    db_key_base = cfg.secrets.db;
+    openid_connect_signing_key = cfg.secrets.jws;
+  };
 
   gitlabConfig = {
     # These are the default settings from config/gitlab.example.yml
@@ -115,12 +112,8 @@ let
         upload_pack = true;
         receive_pack = true;
       };
-      workhorse = {
-        secret_file = "${cfg.statePath}/.gitlab_workhorse_secret";
-      };
-      git = {
-        bin_path = "git";
-      };
+      workhorse.secret_file = "${cfg.statePath}/.gitlab_workhorse_secret";
+      git.bin_path = "git";
       monitoring = {
         ip_whitelist = [ "127.0.0.0/8" "::1/128" ];
         sidekiq_exporter = {
@@ -138,7 +131,7 @@ let
     HOME = "${cfg.statePath}/home";
     UNICORN_PATH = "${cfg.statePath}/";
     GITLAB_PATH = "${cfg.packages.gitlab}/share/gitlab/";
-    GITLAB_STATE_PATH = "${cfg.statePath}";
+    GITLAB_STATE_PATH = cfg.statePath;
     GITLAB_UPLOADS_PATH = "${cfg.statePath}/uploads";
     SCHEMA = "${cfg.statePath}/db/schema.rb";
     GITLAB_LOG_PATH = "${cfg.statePath}/log";
@@ -146,13 +139,11 @@ let
     GITLAB_SHELL_CONFIG_PATH = "${cfg.statePath}/shell/config.yml";
     GITLAB_SHELL_SECRET_PATH = "${cfg.statePath}/config/gitlab_shell_secret";
     GITLAB_SHELL_HOOKS_PATH = "${cfg.statePath}/shell/hooks";
-    GITLAB_REDIS_CONFIG_FILE = pkgs.writeText "gitlab-redis.yml" redisYml;
+    GITLAB_REDIS_CONFIG_FILE = pkgs.writeText "redis.yml" (builtins.toJSON redisConfig);
     prometheus_multiproc_dir = "/run/gitlab";
     RAILS_ENV = "production";
   };
 
-  unicornConfig = builtins.readFile ./defaultUnicornConfig.rb;
-
   gitlab-rake = pkgs.stdenv.mkDerivation rec {
     name = "gitlab-rake";
     buildInputs = [ pkgs.makeWrapper ];
@@ -162,7 +153,6 @@ let
       mkdir -p $out/bin
       makeWrapper ${cfg.packages.gitlab.rubyEnv}/bin/rake $out/bin/gitlab-rake \
           ${concatStrings (mapAttrsToList (name: value: "--set ${name} '${value}' ") gitlabEnv)} \
-          --set GITLAB_CONFIG_PATH '${cfg.statePath}/config' \
           --set PATH '${lib.makeBinPath [ pkgs.nodejs pkgs.gzip pkgs.git pkgs.gnutar config.services.postgresql.package pkgs.coreutils pkgs.procps ]}:$PATH' \
           --set RAKEOPT '-f ${cfg.packages.gitlab}/share/gitlab/Rakefile' \
           --run 'cd ${cfg.packages.gitlab}/share/gitlab'
@@ -306,7 +296,6 @@ in {
 
       initialRootPassword = mkOption {
         type = types.str;
-        default = "UseNixOS!";
         description = ''
           Initial password of the root account if this is a new install.
         '';
@@ -461,10 +450,30 @@ in {
       }
     ];
 
+    systemd.tmpfiles.rules = [
+      "d /run/gitlab 0755 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabEnv.HOME} 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.backupPath} 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/builds 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/config 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/db 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/log 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/repositories 2770 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/shell 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/tmp/pids 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/tmp/sockets 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/uploads 0700 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/custom_hooks/pre-receive.d 0700 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/custom_hooks/post-receive.d 0700 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/custom_hooks/update.d 0700 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path}/artifacts 0750 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path}/lfs-objects 0750 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path}/pages 0750 ${cfg.user} ${cfg.group} -"
+    ];
+
     systemd.services.gitlab-sidekiq = {
-      after = [ "network.target" "redis.service" ];
+      after = [ "network.target" "redis.service" "gitlab.service" ];
       wantedBy = [ "multi-user.target" ];
-      partOf = [ "gitlab.service" ];
       environment = gitlabEnv;
       path = with pkgs; [
         config.services.postgresql.package
@@ -486,10 +495,8 @@ in {
     };
 
     systemd.services.gitaly = {
-      after = [ "network.target" "gitlab.service" ];
+      after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      environment.HOME = gitlabEnv.HOME;
-      environment.GITLAB_SHELL_CONFIG_PATH = gitlabEnv.GITLAB_SHELL_CONFIG_PATH;
       path = with pkgs; [ gitAndTools.git cfg.packages.gitaly.rubyEnv cfg.packages.gitaly.rubyEnv.wrappedRuby ];
       serviceConfig = {
         Type = "simple";
@@ -505,8 +512,6 @@ in {
     systemd.services.gitlab-workhorse = {
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      environment.HOME = gitlabEnv.HOME;
-      environment.GITLAB_SHELL_CONFIG_PATH = gitlabEnv.GITLAB_SHELL_CONFIG_PATH;
       path = with pkgs; [
         gitAndTools.git
         gnutar
@@ -514,10 +519,6 @@ in {
         openssh
         gitlab-workhorse
       ];
-      preStart = ''
-        mkdir -p /run/gitlab
-        chown ${cfg.user}:${cfg.group} /run/gitlab
-      '';
       serviceConfig = {
         PermissionsStartOnly = true; # preStart must be run as root
         Type = "simple";
@@ -538,7 +539,7 @@ in {
     };
 
     systemd.services.gitlab = {
-      after = [ "network.target" "postgresql.service" "redis.service" ];
+      after = [ "gitlab-workhorse.service" "gitaly.service" "network.target" "postgresql.service" "redis.service" ];
       requires = [ "gitlab-sidekiq.service" ];
       wantedBy = [ "multi-user.target" ];
       environment = gitlabEnv;
@@ -551,102 +552,76 @@ in {
         gnupg
       ];
       preStart = ''
-        mkdir -p ${cfg.backupPath}
-        mkdir -p ${cfg.statePath}/builds
-        mkdir -p ${cfg.statePath}/repositories
-        mkdir -p ${gitlabConfig.production.shared.path}/artifacts
-        mkdir -p ${gitlabConfig.production.shared.path}/lfs-objects
-        mkdir -p ${gitlabConfig.production.shared.path}/pages
-        mkdir -p ${cfg.statePath}/log
-        mkdir -p ${cfg.statePath}/tmp/pids
-        mkdir -p ${cfg.statePath}/tmp/sockets
-        mkdir -p ${cfg.statePath}/shell
-        mkdir -p ${cfg.statePath}/db
-        mkdir -p ${cfg.statePath}/uploads
-        mkdir -p ${cfg.statePath}/custom_hooks/pre-receive.d
-        mkdir -p ${cfg.statePath}/custom_hooks/post-receive.d
-        mkdir -p ${cfg.statePath}/custom_hooks/update.d
-
-        rm -rf ${cfg.statePath}/config ${cfg.statePath}/shell/hooks
-        mkdir -p ${cfg.statePath}/config
-
-        ${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/config/gitlab_shell_secret
-
-        mkdir -p /run/gitlab
-        mkdir -p ${cfg.statePath}/log
-        [ -d /run/gitlab/log ] || ln -sf ${cfg.statePath}/log /run/gitlab/log
-        [ -d /run/gitlab/tmp ] || ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp
-        [ -d /run/gitlab/uploads ] || ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads
-        ln -sf $GITLAB_SHELL_CONFIG_PATH /run/gitlab/shell-config.yml
-        chown -R ${cfg.user}:${cfg.group} /run/gitlab
-
-        # Prepare home directory
-        mkdir -p ${gitlabEnv.HOME}/.ssh
-        touch ${gitlabEnv.HOME}/.ssh/authorized_keys
-        chown -R ${cfg.user}:${cfg.group} ${gitlabEnv.HOME}/
-
         cp -rf ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
-        cp -rf ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
-        ${optionalString cfg.smtp.enable ''
-          ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
-        ''}
-        ln -sf ${cfg.statePath}/config /run/gitlab/config
+        rm -rf ${cfg.statePath}/config
+        mkdir ${cfg.statePath}/config
         if [ -e ${cfg.statePath}/lib ]; then
           rm ${cfg.statePath}/lib
         fi
-        ln -sf ${pkgs.gitlab}/share/gitlab/lib ${cfg.statePath}/lib
+
+        ln -sf ${cfg.packages.gitlab}/share/gitlab/lib ${cfg.statePath}/lib
+        [ -L /run/gitlab/config ] || ln -sf ${cfg.statePath}/config /run/gitlab/config
+        [ -L /run/gitlab/log ] || ln -sf ${cfg.statePath}/log /run/gitlab/log
+        [ -L /run/gitlab/tmp ] || ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp
+        [ -L /run/gitlab/uploads ] || ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads
+        ${optionalString cfg.smtp.enable ''
+          ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
+        ''}
         cp ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
+        cp -rf ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
+        ${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/config/gitlab_shell_secret
 
         # JSON is a subset of YAML
-        ln -fs ${pkgs.writeText "gitlab.yml" (builtins.toJSON gitlabConfig)} ${cfg.statePath}/config/gitlab.yml
-        ln -fs ${pkgs.writeText "database.yml" databaseYml} ${cfg.statePath}/config/database.yml
-        ln -fs ${pkgs.writeText "secrets.yml" secretsYml} ${cfg.statePath}/config/secrets.yml
-        ln -fs ${pkgs.writeText "unicorn.rb" unicornConfig} ${cfg.statePath}/config/unicorn.rb
-
-        chown -R ${cfg.user}:${cfg.group} ${cfg.statePath}/
-        chmod -R ug+rwX,o-rwx+X ${cfg.statePath}/
+        ln -sf ${pkgs.writeText "gitlab.yml" (builtins.toJSON gitlabConfig)} ${cfg.statePath}/config/gitlab.yml
+        ln -sf ${pkgs.writeText "database.yml" (builtins.toJSON databaseConfig)} ${cfg.statePath}/config/database.yml
+        ln -sf ${pkgs.writeText "secrets.yml" (builtins.toJSON secretsConfig)} ${cfg.statePath}/config/secrets.yml
+        ln -sf ${./defaultUnicornConfig.rb} ${cfg.statePath}/config/unicorn.rb
 
         # Install the shell required to push repositories
-        ln -fs ${pkgs.writeText "config.yml" gitlabShellYml} "$GITLAB_SHELL_CONFIG_PATH"
-        ln -fs ${cfg.packages.gitlab-shell}/hooks "$GITLAB_SHELL_HOOKS_PATH"
+        ln -sf ${pkgs.writeText "config.yml" (builtins.toJSON gitlabShellConfig)} /run/gitlab/shell-config.yml
+        [ -L ${cfg.statePath}/shell/hooks ] ||  ln -sf ${cfg.packages.gitlab-shell}/hooks ${cfg.statePath}/shell/hooks
         ${cfg.packages.gitlab-shell}/bin/install
 
-        if [ "${cfg.databaseHost}" = "127.0.0.1" ]; then
-          if ! test -e "${cfg.statePath}/db-created"; then
+        chown -R ${cfg.user}:${cfg.group} ${cfg.statePath}/
+        chmod -R ug+rwX,o-rwx+X ${cfg.statePath}/
+        chown -R ${cfg.user}:${cfg.group} /run/gitlab
+
+        if ! test -e "${cfg.statePath}/db-created"; then
+          if [ "${cfg.databaseHost}" = "127.0.0.1" ]; then
             ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "CREATE ROLE ${cfg.databaseUsername} WITH LOGIN NOCREATEDB NOCREATEROLE ENCRYPTED PASSWORD '${cfg.databasePassword}'"
             ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} ${config.services.postgresql.package}/bin/createdb --owner ${cfg.databaseUsername} ${cfg.databaseName}
-            touch "${cfg.statePath}/db-created"
+
+            # enable required pg_trgm extension for gitlab
+            ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql ${cfg.databaseName} -c "CREATE EXTENSION IF NOT EXISTS pg_trgm"
           fi
 
-          # enable required pg_trgm extension for gitlab
-          ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql ${cfg.databaseName} -c "CREATE EXTENSION IF NOT EXISTS pg_trgm"
+          ${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${gitlab-rake}/bin/gitlab-rake db:schema:load
+
+          touch "${cfg.statePath}/db-created"
         fi
 
         # Always do the db migrations just to be sure the database is up-to-date
-        ${gitlab-rake}/bin/gitlab-rake db:migrate RAILS_ENV=production
+        ${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${gitlab-rake}/bin/gitlab-rake db:migrate
 
-        # The gitlab:setup task is horribly broken somehow, the db:migrate
-        # task above and the db:seed_fu below will do the same for setting
-        # up the initial database
         if ! test -e "${cfg.statePath}/db-seeded"; then
-          ${gitlab-rake}/bin/gitlab-rake db:seed_fu RAILS_ENV=production \
+          ${pkgs.sudo}/bin/sudo -u ${cfg.user} ${gitlab-rake}/bin/gitlab-rake db:seed_fu \
             GITLAB_ROOT_PASSWORD='${cfg.initialRootPassword}' GITLAB_ROOT_EMAIL='${cfg.initialRootEmail}'
           touch "${cfg.statePath}/db-seeded"
         fi
 
         # The gitlab:shell:setup regenerates the authorized_keys file so that
         # the store path to the gitlab-shell in it gets updated
-        ${pkgs.sudo}/bin/sudo -u ${cfg.user} force=yes ${gitlab-rake}/bin/gitlab-rake gitlab:shell:setup RAILS_ENV=production
+        ${pkgs.sudo}/bin/sudo -u ${cfg.user} -H force=yes ${gitlab-rake}/bin/gitlab-rake gitlab:shell:setup
 
         # The gitlab:shell:create_hooks task seems broken for fixing links
         # so we instead delete all the hooks and create them anew
         rm -f ${cfg.statePath}/repositories/**/*.git/hooks
-        ${gitlab-rake}/bin/gitlab-rake gitlab:shell:create_hooks RAILS_ENV=production
+        ${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${gitlab-rake}/bin/gitlab-rake gitlab:shell:create_hooks
+
+        ${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${pkgs.git}/bin/git config --global core.autocrlf "input"
 
         # Change permissions in the last step because some of the
         # intermediary scripts like to create directories as root.
-        chown -R ${cfg.user}:${cfg.group} ${cfg.statePath}
-        chmod -R ug+rwX,o-rwx+X ${cfg.statePath}
         chmod -R u+rwX,go-rwx+X ${gitlabEnv.HOME}
         chmod -R ug+rwX,o-rwx ${cfg.statePath}/repositories
         chmod -R ug-s ${cfg.statePath}/repositories
diff --git a/nixos/modules/services/misc/home-assistant.nix b/nixos/modules/services/misc/home-assistant.nix
index 0756e81612ac..2e9aa33aeeee 100644
--- a/nixos/modules/services/misc/home-assistant.nix
+++ b/nixos/modules/services/misc/home-assistant.nix
@@ -157,6 +157,7 @@ in {
         Restart = "on-failure";
         ProtectSystem = "strict";
         ReadWritePaths = "${cfg.configDir}";
+        KillSignal = "SIGINT";
         PrivateTmp = true;
         RemoveIPC = true;
       };
diff --git a/nixos/modules/services/misc/packagekit.nix b/nixos/modules/services/misc/packagekit.nix
index 2d1ff7bb4117..bce21e8acff3 100644
--- a/nixos/modules/services/misc/packagekit.nix
+++ b/nixos/modules/services/misc/packagekit.nix
@@ -6,11 +6,8 @@ let
 
   cfg = config.services.packagekit;
 
-  backend = "nix";
-
   packagekitConf = ''
 [Daemon]
-DefaultBackend=${backend}
 KeepCache=false
     '';
 
diff --git a/nixos/modules/services/monitoring/kapacitor.nix b/nixos/modules/services/monitoring/kapacitor.nix
new file mode 100644
index 000000000000..1de0a8d5af2f
--- /dev/null
+++ b/nixos/modules/services/monitoring/kapacitor.nix
@@ -0,0 +1,154 @@
+{ options, config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.kapacitor;
+
+  kapacitorConf = pkgs.writeTextFile {
+    name = "kapacitord.conf";
+    text = ''
+      hostname="${config.networking.hostName}"
+      data_dir="${cfg.dataDir}"
+
+      [http]
+        bind-address = "${cfg.bind}:${toString cfg.port}"
+        log-enabled = false
+        auth-enabled = false
+
+      [task]
+        dir = "${cfg.dataDir}/tasks"
+        snapshot-interval = "${cfg.taskSnapshotInterval}"
+
+      [replay]
+        dir = "${cfg.dataDir}/replay"
+
+      [storage]
+        boltdb = "${cfg.dataDir}/kapacitor.db"
+
+      ${optionalString (cfg.loadDirectory != null) ''
+        [load]
+          enabled = true
+          dir = "${cfg.loadDirectory}"
+      ''}
+
+      ${optionalString (cfg.defaultDatabase.enable) ''
+        [[influxdb]]
+          name = "default"
+          enabled = true
+          default = true
+          urls = [ "${cfg.defaultDatabase.url}" ]
+          username = "${cfg.defaultDatabase.username}"
+          password = "${cfg.defaultDatabase.password}"
+      ''}
+
+      ${cfg.extraConfig}
+    '';
+  };
+in
+{
+  options.services.kapacitor = {
+    enable = mkEnableOption "kapacitor";
+
+    dataDir = mkOption {
+      type = types.path;
+      example = "/var/lib/kapacitor";
+      default = "/var/lib/kapacitor";
+      description = "Location where Kapacitor stores its state";
+    };
+
+    port = mkOption {
+      type = types.int;
+      default = 9092;
+      description = "Port of Kapacitor";
+    };
+
+    bind = mkOption {
+      type = types.str;
+      default = "";
+      example = literalExample "0.0.0.0";
+      description = "Address to bind to. The default is to bind to all addresses";
+    };
+
+    extraConfig = mkOption {
+      description = "These lines go into kapacitord.conf verbatim.";
+      default = "";
+      type = types.lines;
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "kapacitor";
+      description = "User account under which Kapacitor runs";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "kapacitor";
+      description = "Group under which Kapacitor runs";
+    };
+
+    taskSnapshotInterval = mkOption {
+      type = types.str;
+      description = "Specifies how often to snapshot the task state  (in InfluxDB time units)";
+      default = "1m0s";
+      example = "1m0s";
+    };
+
+    loadDirectory = mkOption {
+      type = types.nullOr types.path;
+      description = "Directory where to load services from, such as tasks, templates and handlers (or null to disable service loading on startup)";
+      default = null;
+    };
+
+    defaultDatabase = {
+      enable = mkEnableOption "kapacitor.defaultDatabase";
+
+      url = mkOption {
+        description = "The URL to an InfluxDB server that serves as the default database";
+        example = "http://localhost:8086";
+        type = types.string;
+      };
+
+      username = mkOption {
+        description = "The username to connect to the remote InfluxDB server";
+        type = types.string;
+      };
+
+      password = mkOption {
+        description = "The password to connect to the remote InfluxDB server";
+        type = types.string;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.kapacitor ];
+
+    systemd.services.kapacitor = {
+      description = "Kapacitor Real-Time Stream Processing Engine";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.kapacitor}/bin/kapacitord -config ${kapacitorConf}";
+        User = "kapacitor";
+        Group = "kapacitor";
+        PermissionsStartOnly = true;
+      };
+      preStart = ''
+        mkdir -p ${cfg.dataDir}
+        chown ${cfg.user}:${cfg.group} ${cfg.dataDir}
+      '';
+    };
+
+    users.users.kapacitor = {
+      uid = config.ids.uids.kapacitor;
+      description = "Kapacitor user";
+      home = cfg.dataDir;
+    };
+
+    users.groups.kapacitor = {
+      gid = config.ids.gids.kapacitor;
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/default.nix b/nixos/modules/services/monitoring/prometheus/default.nix
index e2ee995cea80..bf4dfc666bb6 100644
--- a/nixos/modules/services/monitoring/prometheus/default.nix
+++ b/nixos/modules/services/monitoring/prometheus/default.nix
@@ -10,6 +10,13 @@ let
   # Get a submodule without any embedded metadata:
   _filter = x: filterAttrs (k: v: k != "_module") x;
 
+  # a wrapper that verifies that the configuration is valid
+  promtoolCheck = what: name: file: pkgs.runCommand "${name}-${what}-checked"
+    { buildInputs = [ cfg.package ]; } ''
+    ln -s ${file} $out
+    promtool ${what} $out
+  '';
+
   # Pretty-print JSON to a file
   writePrettyJSON = name: x:
     pkgs.runCommand name { } ''
@@ -19,18 +26,19 @@ let
   # This becomes the main config file
   promConfig = {
     global = cfg.globalConfig;
-    rule_files = cfg.ruleFiles ++ [
+    rule_files = map (promtoolCheck "check-rules" "rules") (cfg.ruleFiles ++ [
       (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
-    ];
+    ]);
     scrape_configs = cfg.scrapeConfigs;
   };
 
   generatedPrometheusYml = writePrettyJSON "prometheus.yml" promConfig;
 
-  prometheusYml =
-    if cfg.configText != null then
+  prometheusYml = let
+    yml =  if cfg.configText != null then
       pkgs.writeText "prometheus.yml" cfg.configText
-    else generatedPrometheusYml;
+      else generatedPrometheusYml;
+    in promtoolCheck "check-config" "prometheus.yml" yml;
 
   cmdlineArgs = cfg.extraFlags ++ [
     "-storage.local.path=${cfg.dataDir}/metrics"
@@ -376,6 +384,15 @@ in {
         '';
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.prometheus;
+        defaultText = "pkgs.prometheus";
+        description = ''
+          The prometheus package that should be used.
+        '';
+      };
+
       listenAddress = mkOption {
         type = types.str;
         default = "0.0.0.0:9090";
@@ -495,7 +512,7 @@ in {
       after    = [ "network.target" ];
       script = ''
         #!/bin/sh
-        exec ${pkgs.prometheus}/bin/prometheus \
+        exec ${cfg.package}/bin/prometheus \
           ${concatStringsSep " \\\n  " cmdlineArgs}
       '';
       serviceConfig = {
diff --git a/nixos/modules/services/networking/bitlbee.nix b/nixos/modules/services/networking/bitlbee.nix
index 46e3b7457610..274b36171608 100644
--- a/nixos/modules/services/networking/bitlbee.nix
+++ b/nixos/modules/services/networking/bitlbee.nix
@@ -33,7 +33,7 @@ let
 
   purple_plugin_path =
     lib.concatMapStringsSep ":"
-      (plugin: "${plugin}/lib/pidgin/")
+      (plugin: "${plugin}/lib/pidgin/:${plugin}/lib/purple-2/")
       cfg.libpurple_plugins
     ;
 
diff --git a/nixos/modules/services/networking/chrony.nix b/nixos/modules/services/networking/chrony.nix
index a363b545d649..9b8005e706ae 100644
--- a/nixos/modules/services/networking/chrony.nix
+++ b/nixos/modules/services/networking/chrony.nix
@@ -93,6 +93,8 @@ in
 
     services.timesyncd.enable = mkForce false;
 
+    systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "chronyd.service"; };
+
     systemd.services.chronyd =
       { description = "chrony NTP daemon";
 
diff --git a/nixos/modules/services/networking/consul.nix b/nixos/modules/services/networking/consul.nix
index ab3f81037681..0e90fed788b9 100644
--- a/nixos/modules/services/networking/consul.nix
+++ b/nixos/modules/services/networking/consul.nix
@@ -6,9 +6,10 @@ let
   dataDir = "/var/lib/consul";
   cfg = config.services.consul;
 
-  configOptions = { data_dir = dataDir; } //
-    (if cfg.webUi then { ui_dir = "${cfg.package.ui}"; } else { }) //
-    cfg.extraConfig;
+  configOptions = {
+    data_dir = dataDir;
+    ui = cfg.webUi;
+  } // cfg.extraConfig;
 
   configFiles = [ "/etc/consul.json" "/etc/consul-addrs.json" ]
     ++ cfg.extraConfigFiles;
diff --git a/nixos/modules/services/networking/ntpd.nix b/nixos/modules/services/networking/ntpd.nix
index 342350d49ab3..32174100b0f7 100644
--- a/nixos/modules/services/networking/ntpd.nix
+++ b/nixos/modules/services/networking/ntpd.nix
@@ -67,6 +67,8 @@ in
     environment.systemPackages = [ pkgs.ntp ];
     services.timesyncd.enable = mkForce false;
 
+    systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "ntpd.service"; };
+
     users.users = singleton
       { name = ntpUser;
         uid = config.ids.uids.ntp;
diff --git a/nixos/modules/services/networking/redsocks.nix b/nixos/modules/services/networking/redsocks.nix
index a47a78f1005e..8481f9debf39 100644
--- a/nixos/modules/services/networking/redsocks.nix
+++ b/nixos/modules/services/networking/redsocks.nix
@@ -267,4 +267,6 @@ in
             "ip46tables -t nat -D OUTPUT -p tcp ${redCond block} -j ${chain} 2>/dev/null || true"
         ) cfg.redsocks;
     };
+
+  meta.maintainers = with lib.maintainers; [ ekleog ];
 }
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index fd31b2a67687..b2ef1885a955 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -63,8 +63,20 @@ in {
         type = types.path;
         default = "/var/lib/syncthing";
         description = ''
+          Path where synced directories will exist.
+        '';
+      };
+
+      configDir = mkOption {
+        type = types.path;
+        description = ''
           Path where the settings and keys will exist.
         '';
+        default =
+          let
+            nixos = config.system.stateVersion;
+            cond  = versionAtLeast nixos "19.03";
+          in cfg.dataDir + (optionalString cond "/.config/syncthing");
       };
 
       openDefaultPorts = mkOption {
@@ -144,7 +156,7 @@ in {
             ${cfg.package}/bin/syncthing \
               -no-browser \
               -gui-address=${cfg.guiAddress} \
-              -home=${cfg.dataDir}
+              -home=${cfg.configDir}
           '';
         };
       };
diff --git a/nixos/modules/services/networking/zerotierone.nix b/nixos/modules/services/networking/zerotierone.nix
index a4cd368397e7..764af3846fe5 100644
--- a/nixos/modules/services/networking/zerotierone.nix
+++ b/nixos/modules/services/networking/zerotierone.nix
@@ -39,7 +39,8 @@ in
     systemd.services.zerotierone = {
       description = "ZeroTierOne";
       path = [ cfg.package ];
-      after = [ "network.target" ];
+      bindsTo = [ "network-online.target" ];
+      after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       preStart = ''
         mkdir -p /var/lib/zerotier-one/networks.d
diff --git a/nixos/modules/services/search/solr.nix b/nixos/modules/services/search/solr.nix
index 90140a337ed8..7200c40e89f7 100644
--- a/nixos/modules/services/search/solr.nix
+++ b/nixos/modules/services/search/solr.nix
@@ -6,142 +6,105 @@ let
 
   cfg = config.services.solr;
 
-  # Assemble all jars needed for solr
-  solrJars = pkgs.stdenv.mkDerivation {
-    name = "solr-jars";
-
-    src = pkgs.fetchurl {
-      url = http://archive.apache.org/dist/tomcat/tomcat-5/v5.5.36/bin/apache-tomcat-5.5.36.tar.gz;
-      sha256 = "01mzvh53wrs1p2ym765jwd00gl6kn8f9k3nhdrnhdqr8dhimfb2p";
-    };
-
-    installPhase = ''
-      mkdir -p $out/lib
-      cp common/lib/*.jar $out/lib/
-      ln -s ${pkgs.ant}/lib/ant/lib/ant.jar $out/lib/
-      ln -s ${cfg.solrPackage}/lib/ext/* $out/lib/
-      ln -s ${pkgs.jdk.home}/lib/tools.jar $out/lib/
-    '' + optionalString (cfg.extraJars != []) ''
-      for f in ${concatStringsSep " " cfg.extraJars}; do
-         cp $f $out/lib
-      done
-    '';
-  };
-
-in {
+in
 
+{
   options = {
     services.solr = {
-      enable = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Enables the solr service.
-        '';
-      };
-
-      javaPackage = mkOption {
-        type = types.package;
-        default = pkgs.jre;
-        defaultText = "pkgs.jre";
-        description = ''
-          Which Java derivation to use for running solr.
-        '';
-      };
+      enable = mkEnableOption "Enables the solr service.";
 
-      solrPackage = mkOption {
+      package = mkOption {
         type = types.package;
         default = pkgs.solr;
         defaultText = "pkgs.solr";
-        description = ''
-          Which solr derivation to use for running solr.
-        '';
+        description = "Which Solr package to use.";
       };
 
-      extraJars = mkOption {
-        type = types.listOf types.path;
-        default = [];
-        description = ''
-          List of paths pointing to jars. Jars are copied to commonLibFolder to be available to java/solr.
-        '';
+      port = mkOption {
+        type = types.int;
+        default = 8983;
+        description = "Port on which Solr is ran.";
       };
 
-      log4jConfiguration = mkOption {
-        type = types.lines;
-        default = ''
-          log4j.rootLogger=INFO, stdout
-          log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-          log4j.appender.stdout.Target=System.out
-          log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-          log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
-        '';
-        description = ''
-          Contents of the <literal>log4j.properties</literal> used. By default,
-          everything is logged to stdout (picked up by systemd) with level INFO.
-        '';
+      stateDir = mkOption {
+        type = types.path;
+        default = "/var/lib/solr";
+        description = "The solr home directory containing config, data, and logging files.";
       };
 
-      user = mkOption {
-        type = types.str;
-        description = ''
-          The user that should run the solr process and.
-          the working directories.
-        '';
+      extraJavaOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = "Extra command line options given to the java process running Solr.";
       };
 
-      group = mkOption {
+      user = mkOption {
         type = types.str;
-        description = ''
-          The group that will own the working directory.
-        '';
+        default = "solr";
+        description = "User under which Solr is ran.";
       };
 
-      solrHome = mkOption {
+      group = mkOption {
         type = types.str;
-        description = ''
-          The solr home directory. It is your own responsibility to
-          make sure this directory contains a working solr configuration,
-          and is writeable by the the user running the solr service.
-          Failing to do so, the solr will not start properly.
-        '';
-      };
-
-      extraJavaOptions = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra command line options given to the java process running
-          solr.
-        '';
-      };
-
-      extraWinstoneOptions = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra command line options given to the Winstone, which is
-          the servlet container hosting solr.
-        '';
+        default = "solr";
+        description = "Group under which Solr is ran.";
       };
     };
   };
 
   config = mkIf cfg.enable {
 
-    services.winstone.solr = {
-      serviceName = "solr";
-      inherit (cfg) user group javaPackage;
-      warFile = "${cfg.solrPackage}/lib/solr.war";
-      extraOptions = [
-        "--commonLibFolder=${solrJars}/lib"
-        "--useJasper"
-      ] ++ cfg.extraWinstoneOptions;
-      extraJavaOptions = [
-        "-Dsolr.solr.home=${cfg.solrHome}"
-        "-Dlog4j.configuration=file://${pkgs.writeText "log4j.properties" cfg.log4jConfiguration}"
-      ] ++ cfg.extraJavaOptions;
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.solr = {
+      after = [ "network.target" "remote-fs.target" "nss-lookup.target" "systemd-journald-dev-log.socket" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        SOLR_HOME = "${cfg.stateDir}/data";
+        LOG4J_PROPS = "${cfg.stateDir}/log4j2.xml";
+        SOLR_LOGS_DIR = "${cfg.stateDir}/logs";
+        SOLR_PORT = "${toString cfg.port}";
+      };
+      path = with pkgs; [
+        gawk
+        procps
+      ];
+      preStart = ''
+        mkdir -p "${cfg.stateDir}/data";
+        mkdir -p "${cfg.stateDir}/logs";
+
+        if ! test -e "${cfg.stateDir}/data/solr.xml"; then
+          install -D -m0640 ${cfg.package}/server/solr/solr.xml "${cfg.stateDir}/data/solr.xml"
+          install -D -m0640 ${cfg.package}/server/solr/zoo.cfg "${cfg.stateDir}/data/zoo.cfg"
+        fi
+
+        if ! test -e "${cfg.stateDir}/log4j2.xml"; then
+          install -D -m0640 ${cfg.package}/server/resources/log4j2.xml "${cfg.stateDir}/log4j2.xml"
+        fi
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart="${cfg.package}/bin/solr start -f -a \"${concatStringsSep " " cfg.extraJavaOptions}\"";
+        ExecStop="${cfg.package}/bin/solr stop";
+      };
     };
 
+    users.users = optionalAttrs (cfg.user == "solr") (singleton
+      { name = "solr";
+        group = cfg.group;
+        home = cfg.stateDir;
+        createHome = true;
+        uid = config.ids.uids.solr;
+      });
+
+    users.groups = optionalAttrs (cfg.group == "solr") (singleton
+      { name = "solr";
+        gid = config.ids.gids.solr;
+      });
+
   };
 
 }
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 508398f03ace..6c733f093ba8 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -46,7 +46,7 @@ let
 
   configFile = pkgs.writeText "nginx.conf" ''
     user ${cfg.user} ${cfg.group};
-    error_log stderr;
+    error_log ${cfg.logError};
     daemon off;
 
     ${cfg.config}
@@ -341,6 +341,35 @@ in
         ";
       };
 
+      logError = mkOption {
+        default = "stderr";
+        description = "
+          Configures logging.
+          The first parameter defines a file that will store the log. The
+          special value stderr selects the standard error file. Logging to
+          syslog can be configured by specifying the “syslog:” prefix.
+          The second parameter determines the level of logging, and can be
+          one of the following: debug, info, notice, warn, error, crit,
+          alert, or emerg. Log levels above are listed in the order of
+          increasing severity. Setting a certain log level will cause all
+          messages of the specified and more severe log levels to be logged.
+          If this parameter is omitted then error is used.
+        ";
+      };
+
+      preStart =  mkOption {
+        type = types.lines;
+        default = ''
+          test -d ${cfg.stateDir}/logs || mkdir -m 750 -p ${cfg.stateDir}/logs  
+          test `stat -c %a ${cfg.stateDir}` = "750" || chmod 750 ${cfg.stateDir}
+          test `stat -c %a ${cfg.stateDir}/logs` = "750" || chmod 750 ${cfg.stateDir}/logs
+          chown -R ${cfg.user}:${cfg.group} ${cfg.stateDir}
+        '';
+        description = "
+          Shell commands executed before the service's nginx is started.
+        ";
+      };
+
       config = mkOption {
         default = "";
         description = "
@@ -608,9 +637,7 @@ in
       stopIfChanged = false;
       preStart =
         ''
-        mkdir -p ${cfg.stateDir}/logs
-        chmod 700 ${cfg.stateDir}
-        chown -R ${cfg.user}:${cfg.group} ${cfg.stateDir}
+        ${cfg.preStart}
         ${cfg.package}/bin/nginx -c ${configFile} -p ${cfg.stateDir} -t
         '';
       serviceConfig = {
diff --git a/nixos/modules/services/web-servers/tomcat.nix b/nixos/modules/services/web-servers/tomcat.nix
index be54e9255c78..68261c50324d 100644
--- a/nixos/modules/services/web-servers/tomcat.nix
+++ b/nixos/modules/services/web-servers/tomcat.nix
@@ -31,10 +31,26 @@ in
         '';
       };
 
+      purifyOnStart = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          On startup, the `baseDir` directory is populated with various files,
+          subdirectories and symlinks. If this option is enabled, these items
+          (except for the `logs` and `work` subdirectories) are first removed.
+          This prevents interference from remainders of an old configuration
+          (libraries, webapps, etc.), so it's recommended to enable this option.
+        '';
+      };
+
       baseDir = mkOption {
         type = lib.types.path;
         default = "/var/tomcat";
-        description = "Location where Tomcat stores configuration files, webapplications and logfiles";
+        description = ''
+          Location where Tomcat stores configuration files, web applications
+          and logfiles. Note that it is partially cleared on each service startup
+          if `purifyOnStart` is enabled.
+        '';
       };
 
       logDirs = mkOption {
@@ -197,6 +213,15 @@ in
       after = [ "network.target" ];
 
       preStart = ''
+        ${lib.optionalString cfg.purifyOnStart ''
+          # Delete most directories/symlinks we create from the existing base directory,
+          # to get rid of remainders of an old configuration.
+          # The list of directories to delete is taken from the "mkdir" command below,
+          # excluding "logs" (because logs are valuable) and "work" (because normally
+          # session files are there), and additionally including "bin".
+          rm -rf ${cfg.baseDir}/{conf,virtualhosts,temp,lib,shared/lib,webapps,bin}
+        ''}
+
         # Create the base directory
         mkdir -p \
           ${cfg.baseDir}/{conf,virtualhosts,logs,temp,lib,shared/lib,webapps,work}
diff --git a/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix b/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
index 013956c05466..d1ee076e9185 100644
--- a/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
@@ -22,7 +22,7 @@ let
       # This wrapper ensures that we actually get themes
       makeWrapper ${pkgs.lightdm_gtk_greeter}/sbin/lightdm-gtk-greeter \
         $out/greeter \
-        --prefix PATH : "${pkgs.glibc.bin}/bin" \
+        --prefix PATH : "${lib.getBin pkgs.stdenv.cc.libc}/bin" \
         --set GDK_PIXBUF_MODULE_FILE "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache" \
         --set GTK_PATH "${theme}:${pkgs.gtk3.out}" \
         --set GTK_EXE_PREFIX "${theme}" \
diff --git a/nixos/modules/system/activation/activation-script.nix b/nixos/modules/system/activation/activation-script.nix
index b1eaf0189562..74c150a848d1 100644
--- a/nixos/modules/system/activation/activation-script.nix
+++ b/nixos/modules/system/activation/activation-script.nix
@@ -21,7 +21,8 @@ let
     [ coreutils
       gnugrep
       findutils
-      glibc # needed for getent
+      getent
+      stdenv.cc.libc # nscd in update-users-groups.pl
       shadow
       nettools # needed for hostname
       utillinux # needed for mount and mountpoint
diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
index f4cf9753c0a1..e7167999a6f8 100644
--- a/nixos/modules/system/boot/stage-1.nix
+++ b/nixos/modules/system/boot/stage-1.nix
@@ -147,7 +147,7 @@ let
       ${config.boot.initrd.extraUtilsCommands}
 
       # Copy ld manually since it isn't detected correctly
-      cp -pv ${pkgs.glibc.out}/lib/ld*.so.? $out/lib
+      cp -pv ${pkgs.stdenv.cc.libc.out}/lib/ld*.so.? $out/lib
 
       # Copy all of the needed libraries
       find $out/bin $out/lib -type f | while read BIN; do
diff --git a/nixos/modules/system/boot/systemd-nspawn.nix b/nixos/modules/system/boot/systemd-nspawn.nix
index f4fa09694537..4f538ccdbbe1 100644
--- a/nixos/modules/system/boot/systemd-nspawn.nix
+++ b/nixos/modules/system/boot/systemd-nspawn.nix
@@ -112,6 +112,7 @@ in {
 
       environment.etc."systemd/nspawn".source = generateUnits "nspawn" units [] [];
 
+      systemd.targets."multi-user".wants = [ "machines.target "];
   };
 
 }
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 3d9fa53ce522..89f8e8153550 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -387,7 +387,7 @@ let
 
   logindHandlerType = types.enum [
     "ignore" "poweroff" "reboot" "halt" "kexec" "suspend"
-    "hibernate" "hybrid-sleep" "lock"
+    "hibernate" "hybrid-sleep" "suspend-then-hibernate" "lock"
   ];
 
 in
diff --git a/nixos/modules/tasks/filesystems.nix b/nixos/modules/tasks/filesystems.nix
index b3690fad1a6a..9e4057b50897 100644
--- a/nixos/modules/tasks/filesystems.nix
+++ b/nixos/modules/tasks/filesystems.nix
@@ -230,6 +230,8 @@ in
       let
         fsToSkipCheck = [ "none" "bindfs" "btrfs" "zfs" "tmpfs" "nfs" "vboxsf" "glusterfs" ];
         skipCheck = fs: fs.noCheck || fs.device == "none" || builtins.elem fs.fsType fsToSkipCheck;
+        # https://wiki.archlinux.org/index.php/fstab#Filepath_spaces
+        escape = string: builtins.replaceStrings [ " " ] [ "\\040" ] string;
       in ''
         # This is a generated file.  Do not edit!
         #
@@ -238,10 +240,10 @@ in
 
         # Filesystems.
         ${concatMapStrings (fs:
-            (if fs.device != null then fs.device
-             else if fs.label != null then "/dev/disk/by-label/${fs.label}"
+            (if fs.device != null then escape fs.device
+             else if fs.label != null then "/dev/disk/by-label/${escape fs.label}"
              else throw "No device specified for mount point ‘${fs.mountPoint}’.")
-            + " " + fs.mountPoint
+            + " " + escape fs.mountPoint
             + " " + fs.fsType
             + " " + builtins.concatStringsSep "," fs.options
             + " 0"
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index c92570582f20..9015200beead 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -53,7 +53,7 @@ let cfg = config.ec2; in
     # Mount all formatted ephemeral disks and activate all swap devices.
     # We cannot do this with the ‘fileSystems’ and ‘swapDevices’ options
     # because the set of devices is dependent on the instance type
-    # (e.g. "m1.large" has one ephemeral filesystem and one swap device,
+    # (e.g. "m1.small" has one ephemeral filesystem and one swap device,
     # while "m1.large" has two ephemeral filesystems and no swap
     # devices).  Also, put /tmp and /var on /disk0, since it has a lot
     # more space than the root device.  Similarly, "move" /nix to /disk0
diff --git a/nixos/modules/virtualisation/containers.nix b/nixos/modules/virtualisation/containers.nix
index 8fe59badd335..2fcc0f254256 100644
--- a/nixos/modules/virtualisation/containers.nix
+++ b/nixos/modules/virtualisation/containers.nix
@@ -243,6 +243,9 @@ let
 
     Restart = "on-failure";
 
+    Slice = "machine.slice";
+    Delegate = true;
+
     # Hack: we don't want to kill systemd-nspawn, since we call
     # "machinectl poweroff" in preStop to shut down the
     # container cleanly. But systemd requires sending a signal
@@ -606,7 +609,7 @@ in
               { config =
                   { config, pkgs, ... }:
                   { services.postgresql.enable = true;
-                    services.postgresql.package = pkgs.postgresql96;
+                    services.postgresql.package = pkgs.postgresql_9_6;
 
                     system.stateVersion = "17.03";
                   };
@@ -657,6 +660,8 @@ in
       serviceConfig = serviceDirectives dummyConfig;
     };
   in {
+    systemd.targets."multi-user".wants = [ "machines.target" ];
+
     systemd.services = listToAttrs (filter (x: x.value != null) (
       # The generic container template used by imperative containers
       [{ name = "container@"; value = unit; }]
@@ -680,7 +685,7 @@ in
           } // (
           if config.autoStart then
             {
-              wantedBy = [ "multi-user.target" ];
+              wantedBy = [ "machines.target" ];
               wants = [ "network.target" ];
               after = [ "network.target" ];
               restartTriggers = [ config.path ];
diff --git a/nixos/modules/virtualisation/docker-preloader.nix b/nixos/modules/virtualisation/docker-preloader.nix
new file mode 100644
index 000000000000..faa94f53d98f
--- /dev/null
+++ b/nixos/modules/virtualisation/docker-preloader.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+with builtins;
+
+let
+  cfg = config.virtualisation;
+
+  sanitizeImageName = image: replaceStrings ["/"] ["-"] image.imageName;
+  hash = drv: head (split "-" (baseNameOf drv.outPath));
+  # The label of an ext4 FS is limited to 16 bytes
+  labelFromImage = image: substring 0 16 (hash image);
+
+  # The Docker image is loaded and some files from /var/lib/docker/
+  # are written into a qcow image.
+  preload = image: pkgs.vmTools.runInLinuxVM (
+    pkgs.runCommand "docker-preload-image-${sanitizeImageName image}" {
+      buildInputs = with pkgs; [ docker e2fsprogs utillinux curl kmod ];
+      preVM = pkgs.vmTools.createEmptyImage {
+        size = cfg.dockerPreloader.qcowSize;
+        fullName = "docker-deamon-image.qcow2";
+      };
+    }
+    ''
+      mkfs.ext4 /dev/vda
+      e2label /dev/vda ${labelFromImage image}
+      mkdir -p /var/lib/docker
+      mount -t ext4 /dev/vda /var/lib/docker
+
+      modprobe overlay
+
+      # from https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
+      mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
+      cd /sys/fs/cgroup
+      for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
+        mkdir -p $sys
+        if ! mountpoint -q $sys; then
+          if ! mount -n -t cgroup -o $sys cgroup $sys; then
+            rmdir $sys || true
+          fi
+        fi
+      done
+
+      dockerd -H tcp://127.0.0.1:5555 -H unix:///var/run/docker.sock &
+
+      until $(curl --output /dev/null --silent --connect-timeout 2 http://127.0.0.1:5555); do
+        printf '.'
+        sleep 1
+      done
+
+      docker load -i ${image}
+
+      kill %1
+      find /var/lib/docker/ -maxdepth 1 -mindepth 1 -not -name "image" -not -name "overlay2" | xargs rm -rf
+    '');
+
+  preloadedImages = map preload cfg.dockerPreloader.images;
+
+in
+
+{
+  options.virtualisation.dockerPreloader = {
+    images = mkOption {
+      default = [ ];
+      type = types.listOf types.package;
+      description =
+      ''
+        A list of Docker images to preload (in the /var/lib/docker directory).
+      '';
+    };
+    qcowSize = mkOption {
+      default = 1024;
+      type = types.int;
+      description =
+      ''
+        The size (MB) of qcow files.
+      '';
+    };
+  };
+
+  config = {
+    assertions = [{
+      # If docker.storageDriver is null, Docker choose the storage
+      # driver. So, in this case, we cannot be sure overlay2 is used.
+      assertion = cfg.dockerPreloader.images == []
+        || cfg.docker.storageDriver == "overlay2"
+        || cfg.docker.storageDriver == "overlay"
+        || cfg.docker.storageDriver == null;
+      message = "The Docker image Preloader only works with overlay2 storage driver!";
+    }];
+
+    virtualisation.qemu.options =
+      map (path: "-drive if=virtio,file=${path}/disk-image.qcow2,readonly,media=cdrom,format=qcow2")
+      preloadedImages;
+
+
+    # All attached QCOW files are mounted and their contents are linked
+    # to /var/lib/docker/ in order to make image available.
+    systemd.services.docker-preloader = {
+      description = "Preloaded Docker images";
+      wantedBy = ["docker.service"];
+      after = ["network.target"];
+      path = with pkgs; [ mount rsync jq ];
+      script = ''
+        mkdir -p /var/lib/docker/overlay2/l /var/lib/docker/image/overlay2
+        echo '{}' > /tmp/repositories.json
+
+        for i in ${concatStringsSep " " (map labelFromImage cfg.dockerPreloader.images)}; do
+          mkdir -p /mnt/docker-images/$i
+
+          # The ext4 label is limited to 16 bytes
+          mount /dev/disk/by-label/$(echo $i | cut -c1-16) -o ro,noload /mnt/docker-images/$i
+
+          find /mnt/docker-images/$i/overlay2/ -maxdepth 1 -mindepth 1 -not -name l\
+             -exec ln -s '{}' /var/lib/docker/overlay2/ \;
+          cp -P /mnt/docker-images/$i/overlay2/l/* /var/lib/docker/overlay2/l/
+
+          rsync -a /mnt/docker-images/$i/image/ /var/lib/docker/image/
+
+          # Accumulate image definitions
+          cp /tmp/repositories.json /tmp/repositories.json.tmp
+          jq -s '.[0] * .[1]' \
+            /tmp/repositories.json.tmp \
+            /mnt/docker-images/$i/image/overlay2/repositories.json \
+            > /tmp/repositories.json
+        done
+
+        mv /tmp/repositories.json /var/lib/docker/image/overlay2/repositories.json
+      '';
+      serviceConfig = {
+        Type = "oneshot";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/google-compute-image.nix b/nixos/modules/virtualisation/google-compute-image.nix
index caaf6c0aa59d..795858e5eae2 100644
--- a/nixos/modules/virtualisation/google-compute-image.nix
+++ b/nixos/modules/virtualisation/google-compute-image.nix
@@ -144,7 +144,6 @@ in
     path = with pkgs; [ iproute ];
     serviceConfig = {
       ExecStart = "${gce}/bin/google_network_daemon --debug";
-      Type = "oneshot";
     };
   };
 
diff --git a/nixos/modules/virtualisation/libvirtd.nix b/nixos/modules/virtualisation/libvirtd.nix
index 3e38662f5b0f..f4d7af1664af 100644
--- a/nixos/modules/virtualisation/libvirtd.nix
+++ b/nixos/modules/virtualisation/libvirtd.nix
@@ -196,6 +196,8 @@ in {
       wantedBy = [ "multi-user.target" ];
       path = with pkgs; [ coreutils libvirt gawk ];
       restartIfChanged = false;
+
+      environment.ON_SHUTDOWN = "${cfg.onShutdown}";
     };
 
     systemd.sockets.virtlogd = {
diff --git a/nixos/modules/virtualisation/parallels-guest.nix b/nixos/modules/virtualisation/parallels-guest.nix
index 36ca7f356d44..4e0f2cae299e 100644
--- a/nixos/modules/virtualisation/parallels-guest.nix
+++ b/nixos/modules/virtualisation/parallels-guest.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, pkgs_i686, ... }:
+{ config, lib, pkgs, ... }:
 
 with lib;
 
@@ -64,7 +64,7 @@ in
     };
 
     hardware.opengl.package = prl-tools;
-    hardware.opengl.package32 = pkgs_i686.linuxPackages.prl-tools.override { libsOnly = true; kernel = null; };
+    hardware.opengl.package32 = pkgs.pkgsi686Linux.linuxPackages.prl-tools.override { libsOnly = true; kernel = null; };
 
     services.udev.packages = [ prl-tools ];
 
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
index 4e9c87222d0a..ed3431554be4 100644
--- a/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -185,7 +185,10 @@ let
 in
 
 {
-  imports = [ ../profiles/qemu-guest.nix ];
+  imports = [
+    ../profiles/qemu-guest.nix
+   ./docker-preloader.nix
+  ];
 
   options = {
 
diff --git a/nixos/modules/virtualisation/virtualbox-image.nix b/nixos/modules/virtualisation/virtualbox-image.nix
index 60048911658c..037c0d2f0d82 100644
--- a/nixos/modules/virtualisation/virtualbox-image.nix
+++ b/nixos/modules/virtualisation/virtualbox-image.nix
@@ -12,7 +12,7 @@ in {
     virtualbox = {
       baseImageSize = mkOption {
         type = types.int;
-        default = 10 * 1024;
+        default = 50 * 1024;
         description = ''
           The size of the VirtualBox base image in MiB.
         '';
@@ -61,7 +61,7 @@ in {
           export HOME=$PWD
           export PATH=${pkgs.virtualbox}/bin:$PATH
 
-          echo "creating VirtualBox pass-through disk wrapper (no copying invovled)..."
+          echo "creating VirtualBox pass-through disk wrapper (no copying involved)..."
           VBoxManage internalcommands createrawvmdk -filename disk.vmdk -rawdisk $diskImage
 
           echo "creating VirtualBox VM..."
@@ -72,9 +72,9 @@ in {
             --memory ${toString cfg.memorySize} --acpi on --vram 32 \
             ${optionalString (pkgs.stdenv.hostPlatform.system == "i686-linux") "--pae on"} \
             --nictype1 virtio --nic1 nat \
-            --audiocontroller ac97 --audio alsa \
+            --audiocontroller ac97 --audio alsa --audioout on \
             --rtcuseutc on \
-            --usb on --mouse usbtablet
+            --usb on --usbehci on --mouse usbtablet
           VBoxManage storagectl "$vmName" --name SATA --add sata --portcount 4 --bootable on --hostiocache on
           VBoxManage storageattach "$vmName" --storagectl SATA --port 0 --device 0 --type hdd \
             --medium disk.vmdk
@@ -82,7 +82,7 @@ in {
           echo "exporting VirtualBox VM..."
           mkdir -p $out
           fn="$out/${cfg.vmFileName}"
-          VBoxManage export "$vmName" --output "$fn"
+          VBoxManage export "$vmName" --output "$fn" --options manifest
 
           rm -v $diskImage