summary refs log tree commit diff
path: root/nixos/modules
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules')
-rw-r--r--nixos/modules/config/no-x-libs.nix22
-rw-r--r--nixos/modules/hardware/brightnessctl.nix30
-rw-r--r--nixos/modules/hardware/video/uvcvideo/default.nix64
-rw-r--r--nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix46
-rw-r--r--nixos/modules/i18n/input-method/default.xml12
-rw-r--r--nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix25
-rw-r--r--nixos/modules/installer/scan/not-detected.nix9
-rw-r--r--nixos/modules/installer/tools/nix-fallback-paths.nix8
-rw-r--r--nixos/modules/misc/ids.nix12
-rw-r--r--nixos/modules/module-list.nix11
-rw-r--r--nixos/modules/profiles/base.nix1
-rw-r--r--nixos/modules/programs/environment.nix2
-rw-r--r--nixos/modules/programs/mosh.nix18
-rw-r--r--nixos/modules/programs/npm.nix4
-rw-r--r--nixos/modules/programs/screen.nix4
-rw-r--r--nixos/modules/programs/shell.nix36
-rw-r--r--nixos/modules/programs/thefuck.nix4
-rw-r--r--nixos/modules/programs/zsh/zsh-autosuggestions.nix60
-rw-r--r--nixos/modules/programs/zsh/zsh.nix15
-rw-r--r--nixos/modules/rename.nix12
-rw-r--r--nixos/modules/services/admin/oxidized.nix116
-rw-r--r--nixos/modules/services/audio/mpd.nix9
-rw-r--r--nixos/modules/services/audio/slimserver.nix3
-rw-r--r--nixos/modules/services/backup/borgbackup.nix50
-rw-r--r--nixos/modules/services/backup/duplicati.nix19
-rw-r--r--nixos/modules/services/backup/postgresql-backup.nix62
-rw-r--r--nixos/modules/services/cluster/kubernetes/dashboard.nix240
-rw-r--r--nixos/modules/services/cluster/kubernetes/default.nix127
-rw-r--r--nixos/modules/services/cluster/kubernetes/dns.nix165
-rw-r--r--nixos/modules/services/computing/slurm/slurm.nix92
-rw-r--r--nixos/modules/services/databases/mysql.nix22
-rw-r--r--nixos/modules/services/databases/pgmanage.nix68
-rw-r--r--nixos/modules/services/databases/postgresql.nix5
-rw-r--r--nixos/modules/services/desktops/flatpak.nix10
-rw-r--r--nixos/modules/services/development/bloop.nix37
-rw-r--r--nixos/modules/services/logging/journaldriver.nix112
-rw-r--r--nixos/modules/services/mail/exim.nix1
-rw-r--r--nixos/modules/services/mail/opensmtpd.nix13
-rw-r--r--nixos/modules/services/misc/docker-registry.nix5
-rw-r--r--nixos/modules/services/misc/gitea.nix11
-rw-r--r--nixos/modules/services/misc/gitlab.nix13
-rw-r--r--nixos/modules/services/misc/home-assistant.nix8
-rw-r--r--nixos/modules/services/misc/nix-daemon.nix27
-rw-r--r--nixos/modules/services/misc/nixos-manual.nix2
-rw-r--r--nixos/modules/services/misc/xmr-stak.nix60
-rw-r--r--nixos/modules/services/monitoring/dd-agent/dd-agent.nix12
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix39
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/node.nix1
-rw-r--r--nixos/modules/services/network-filesystems/beegfs.nix2
-rw-r--r--nixos/modules/services/network-filesystems/ipfs.nix16
-rw-r--r--nixos/modules/services/networking/bind.nix10
-rw-r--r--nixos/modules/services/networking/chrony.nix2
-rw-r--r--nixos/modules/services/networking/dnscrypt-proxy.nix3
-rw-r--r--nixos/modules/services/networking/dnsdist.nix61
-rw-r--r--nixos/modules/services/networking/morty.nix98
-rw-r--r--nixos/modules/services/networking/nat.nix20
-rw-r--r--nixos/modules/services/networking/networkmanager.nix10
-rw-r--r--nixos/modules/services/networking/openntpd.nix10
-rw-r--r--nixos/modules/services/networking/owamp.nix47
-rw-r--r--nixos/modules/services/networking/ssh/sshd.nix42
-rw-r--r--nixos/modules/services/networking/sslh.nix114
-rw-r--r--nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix7
-rw-r--r--nixos/modules/services/networking/tinc.nix10
-rw-r--r--nixos/modules/services/networking/unbound.nix8
-rw-r--r--nixos/modules/services/networking/xrdp.nix1
-rw-r--r--nixos/modules/services/scheduling/fcron.nix1
-rw-r--r--nixos/modules/services/security/munge.nix16
-rw-r--r--nixos/modules/services/security/oauth2_proxy.nix1
-rw-r--r--nixos/modules/services/security/tor.nix51
-rw-r--r--nixos/modules/services/web-apps/atlassian/crowd.nix5
-rw-r--r--nixos/modules/services/web-apps/mattermost.nix8
-rw-r--r--nixos/modules/services/web-apps/nexus.nix16
-rw-r--r--nixos/modules/services/web-apps/tt-rss.nix40
-rw-r--r--nixos/modules/services/web-apps/virtlyst.nix72
-rw-r--r--nixos/modules/services/web-servers/meguca.nix158
-rw-r--r--nixos/modules/services/web-servers/minio.nix2
-rw-r--r--nixos/modules/services/web-servers/tomcat.nix2
-rw-r--r--nixos/modules/services/web-servers/uwsgi.nix8
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix100
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix1
-rw-r--r--nixos/modules/services/x11/display-managers/sddm.nix15
-rw-r--r--nixos/modules/services/x11/window-managers/awesome.nix7
-rw-r--r--nixos/modules/services/x11/xserver.nix7
-rw-r--r--nixos/modules/system/boot/initrd-network.nix1
-rw-r--r--nixos/modules/system/boot/loader/grub/grub.nix28
-rw-r--r--nixos/modules/system/boot/loader/grub/install-grub.pl42
-rw-r--r--nixos/modules/system/boot/stage-1-init.sh3
-rw-r--r--nixos/modules/system/boot/stage-1.nix6
-rw-r--r--nixos/modules/system/boot/systemd-lib.nix14
-rw-r--r--nixos/modules/system/boot/timesyncd.nix2
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix24
-rw-r--r--nixos/modules/tasks/network-interfaces-scripted.nix2
-rw-r--r--nixos/modules/tasks/network-interfaces.nix16
-rw-r--r--nixos/modules/tasks/scsi-link-power-management.nix26
-rw-r--r--nixos/modules/tasks/swraid.nix2
-rw-r--r--nixos/modules/virtualisation/azure-image.nix4
-rw-r--r--nixos/modules/virtualisation/azure-qemu-220-no-etc-install.patch14
-rw-r--r--nixos/modules/virtualisation/gce-images.nix3
-rw-r--r--nixos/modules/virtualisation/google-compute-image.nix2
-rw-r--r--nixos/modules/virtualisation/kvmgt.nix64
101 files changed, 2427 insertions, 522 deletions
diff --git a/nixos/modules/config/no-x-libs.nix b/nixos/modules/config/no-x-libs.nix
index a20910353f34..c7a6c943bc27 100644
--- a/nixos/modules/config/no-x-libs.nix
+++ b/nixos/modules/config/no-x-libs.nix
@@ -26,16 +26,16 @@ with lib;
 
     fonts.fontconfig.enable = false;
 
-    nixpkgs.config.packageOverrides = pkgs: {
-      dbus = pkgs.dbus.override { x11Support = false; };
-      networkmanager-fortisslvpn = pkgs.networkmanager-fortisslvpn.override { withGnome = false; };
-      networkmanager-l2tp = pkgs.networkmanager-l2tp.override { withGnome = false; };
-      networkmanager-openconnect = pkgs.networkmanager-openconnect.override { withGnome = false; };
-      networkmanager-openvpn = pkgs.networkmanager-openvpn.override { withGnome = false; };
-      networkmanager-vpnc = pkgs.networkmanager-vpnc.override { withGnome = false; };
-      networkmanager-iodine = pkgs.networkmanager-iodine.override { withGnome = false; };
-      pinentry = pkgs.pinentry_ncurses;
-      gobjectIntrospection = pkgs.gobjectIntrospection.override { x11Support = false; };
-    };
+    nixpkgs.overlays = singleton (const (super: {
+      dbus = super.dbus.override { x11Support = false; };
+      networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; };
+      networkmanager-l2tp = super.networkmanager-l2tp.override { withGnome = false; };
+      networkmanager-openconnect = super.networkmanager-openconnect.override { withGnome = false; };
+      networkmanager-openvpn = super.networkmanager-openvpn.override { withGnome = false; };
+      networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
+      networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
+      pinentry = super.pinentry_ncurses;
+      gobjectIntrospection = super.gobjectIntrospection.override { x11Support = false; };
+    }));
   };
 }
diff --git a/nixos/modules/hardware/brightnessctl.nix b/nixos/modules/hardware/brightnessctl.nix
new file mode 100644
index 000000000000..341e4b791c23
--- /dev/null
+++ b/nixos/modules/hardware/brightnessctl.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.hardware.brightnessctl;
+in
+{
+
+  options = {
+
+    hardware.brightnessctl = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Enable brightnessctl in userspace.
+          This will allow brightness control from users in the video group.
+        '';
+
+      };
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+    services.udev.packages = with pkgs; [ brightnessctl ];
+  };
+
+}
diff --git a/nixos/modules/hardware/video/uvcvideo/default.nix b/nixos/modules/hardware/video/uvcvideo/default.nix
new file mode 100644
index 000000000000..7e3e94fdf2bd
--- /dev/null
+++ b/nixos/modules/hardware/video/uvcvideo/default.nix
@@ -0,0 +1,64 @@
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.uvcvideo;
+
+  uvcdynctrl-udev-rules = packages: pkgs.callPackage ./uvcdynctrl-udev-rules.nix {
+    drivers = packages;
+    udevDebug = false;
+  };
+
+in
+
+{
+
+  options = {
+    services.uvcvideo.dynctrl = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable <command>uvcvideo</command> dynamic controls.
+
+          Note that enabling this brings the <command>uvcdynctrl</command> tool
+          into your environement and register all dynamic controls from
+          specified <command>packages</command> to the <command>uvcvideo</command> driver.
+        '';
+      };
+
+      packages = mkOption {
+        type = types.listOf types.path;
+        example = literalExample "[ pkgs.tiscamera ]";
+        description = ''
+          List of packages containing <command>uvcvideo</command> dynamic controls
+          rules. All files found in
+          <filename><replaceable>pkg</replaceable>/share/uvcdynctrl/data</filename>
+          will be included.
+
+          Note that these will serve as input to the <command>libwebcam</command>
+          package which through its own <command>udev</command> rule will register
+          the dynamic controls from specified packages to the <command>uvcvideo</command>
+          driver.
+        '';
+        apply = map getBin;
+      };
+    };
+  };
+
+  config = mkIf cfg.dynctrl.enable {
+
+    services.udev.packages = [
+      (uvcdynctrl-udev-rules cfg.dynctrl.packages)
+    ];
+
+    environment.systemPackages = [
+      pkgs.libwebcam
+    ];
+
+  };
+}
diff --git a/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix b/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix
new file mode 100644
index 000000000000..832e61966120
--- /dev/null
+++ b/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix
@@ -0,0 +1,46 @@
+{ lib
+, stdenv
+, buildEnv
+, libwebcam
+, makeWrapper
+, runCommand
+, drivers ? []
+, udevDebug ? false
+}:
+
+let
+  version = "0.0.0";
+
+  dataPath = buildEnv {
+    name = "uvcdynctrl-with-drivers-data-path";
+    paths = drivers ++ [ libwebcam ];
+    pathsToLink = [ "/share/uvcdynctrl/data" ];
+    ignoreCollisions = false;
+  };
+
+  dataDir = "${dataPath}/share/uvcdynctrl/data";
+  udevDebugVarValue = if udevDebug then "1" else "0";
+in
+
+runCommand "uvcdynctrl-udev-rules-${version}"
+{
+  inherit dataPath;
+  buildInputs = [
+    makeWrapper
+    libwebcam
+  ];
+  dontPatchELF = true;
+  dontStrip = true;
+}
+''
+  mkdir -p "$out/lib/udev"
+  makeWrapper "${libwebcam}/lib/udev/uvcdynctrl" "$out/lib/udev/uvcdynctrl" \
+    --set NIX_UVCDYNCTRL_DATA_DIR "${dataDir}" \
+    --set NIX_UVCDYNCTRL_UDEV_DEBUG "${udevDebugVarValue}"
+
+  mkdir -p "$out/lib/udev/rules.d"
+  cat "${libwebcam}/lib/udev/rules.d/80-uvcdynctrl.rules" | \
+    sed -r "s#RUN\+\=\"([^\"]+)\"#RUN\+\=\"$out/lib/udev/uvcdynctrl\"#g" > \
+    "$out/lib/udev/rules.d/80-uvcdynctrl.rules"
+''
+
diff --git a/nixos/modules/i18n/input-method/default.xml b/nixos/modules/i18n/input-method/default.xml
index 76ffa8cb7e37..eb75b7415c9c 100644
--- a/nixos/modules/i18n/input-method/default.xml
+++ b/nixos/modules/i18n/input-method/default.xml
@@ -68,6 +68,18 @@ ibus.engines = with pkgs.ibus-engines; [ table table-others ];
 <para>To use any input method, the package must be added in the configuration,
   as shown above, and also (after running <literal>nixos-rebuild</literal>) the
   input method must be added from IBus' preference dialog.</para>
+
+<simplesect>
+  <title>Troubleshooting</title>
+  <para>If IBus works in some applications but not others, a likely cause of
+  this is that IBus is depending on a different version of
+  <literal>glib</literal> to what the applications are depending on. This can
+  be checked by running <literal>nix-store -q --requisites &lt;path&gt; | grep
+  glib</literal>, where <literal>&lt;path&gt;</literal> is the path of either
+  IBus or an application in the Nix store. The <literal>glib</literal>
+  packages must match exactly. If they do not, uninstalling and reinstalling
+  the application is a likely fix.</para>
+</simplesect>
 </section>
 
 <section><title>Fcitx</title>
diff --git a/nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix b/nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix
index 212013b5e289..fe6cc4161630 100644
--- a/nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix
+++ b/nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix
@@ -31,11 +31,24 @@ in
   users.extraUsers.root.initialHashedPassword = "";
 
   sdImage = {
-    populateBootCommands = ''
-      (cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/boot/)
-      cp ${pkgs.ubootRaspberryPi}/u-boot.bin boot/u-boot-rpi.bin
-      echo 'kernel u-boot-rpi.bin' > boot/config.txt
-      ${extlinux-conf-builder} -t 3 -c ${config.system.build.toplevel} -d ./boot
-    '';
+    populateBootCommands = let
+      configTxt = pkgs.writeText "config.txt" ''
+        # Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
+        # when attempting to show low-voltage or overtemperature warnings.
+        avoid_warnings=1
+
+        [pi0]
+        kernel=u-boot-rpi0.bin
+
+        [pi1]
+        kernel=u-boot-rpi1.bin
+      '';
+      in ''
+        (cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/boot/)
+        cp ${pkgs.ubootRaspberryPiZero}/u-boot.bin boot/u-boot-rpi0.bin
+        cp ${pkgs.ubootRaspberryPi}/u-boot.bin boot/u-boot-rpi1.bin
+        cp ${configTxt} boot/config.txt
+        ${extlinux-conf-builder} -t 3 -c ${config.system.build.toplevel} -d ./boot
+      '';
   };
 }
diff --git a/nixos/modules/installer/scan/not-detected.nix b/nixos/modules/installer/scan/not-detected.nix
index 903933e2df02..baa068c08dbf 100644
--- a/nixos/modules/installer/scan/not-detected.nix
+++ b/nixos/modules/installer/scan/not-detected.nix
@@ -1,9 +1,6 @@
-# List all devices which are _not_ detected by nixos-generate-config.
-# Common devices are enabled by default.
-{ config, lib, pkgs, ... }:
-
-with lib;
+# Enables non-free firmware on devices not recognized by `nixos-generate-config`.
+{ lib, ... }:
 
 {
-  hardware.enableRedistributableFirmware = true;
+  hardware.enableRedistributableFirmware = lib.mkDefault true;
 }
diff --git a/nixos/modules/installer/tools/nix-fallback-paths.nix b/nixos/modules/installer/tools/nix-fallback-paths.nix
index 5ad28ea9499e..7c5414257b46 100644
--- a/nixos/modules/installer/tools/nix-fallback-paths.nix
+++ b/nixos/modules/installer/tools/nix-fallback-paths.nix
@@ -1,6 +1,6 @@
 {
-  x86_64-linux = "/nix/store/z6avpvg24f6d1br2sr6qlphsq3h4d91v-nix-2.0.2";
-  i686-linux = "/nix/store/cdqjyb9srhwkc4gqbknnap7y31lws4yq-nix-2.0.2";
-  aarch64-linux = "/nix/store/fbgaa3fb2am30klwv4lls44njwqh487a-nix-2.0.2";
-  x86_64-darwin = "/nix/store/hs8mxsvdhm95dxgx943d74fws01j2zj3-nix-2.0.2";
+  x86_64-linux = "/nix/store/0d60i73mcv8z1m8d2m74yfn84980gfsa-nix-2.0.4";
+  i686-linux = "/nix/store/6ssafj2s5a2g9x28yld7b70vwd6vw6lb-nix-2.0.4";
+  aarch64-linux = "/nix/store/3wwch7bp7n7xsl8apgy2a4b16yzyij1z-nix-2.0.4";
+  x86_64-darwin = "/nix/store/771l8i0mz4c8kry8cz3sz8rr3alalckg-nix-2.0.4";
 }
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index cc7d86849824..73231edf077b 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -1,6 +1,14 @@
 # This module defines the global list of uids and gids.  We keep a
 # central list to prevent id collisions.
 
+# IMPORTANT!
+# We only add static uids and gids for services where it is not feasible
+# to change uids/gids on service start, in example a service with a lot of
+# files. Please also check if the service is applicable for systemd's
+# DynamicUser option and does not need a uid/gid allocation at all.
+# Systemd can also change ownership of service directories using the
+# RuntimeDirectory/StateDirectory options.
+
 { config, pkgs, lib, ... }:
 
 {
@@ -307,6 +315,8 @@
       duplicati = 289;
       monetdb = 290;
       restic = 291;
+      openvpn = 292;
+      meguca = 293;
 
       # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
 
@@ -582,6 +592,8 @@
       duplicati = 289;
       monetdb = 290;
       restic = 291;
+      openvpn = 292;
+      meguca = 293;
 
       # When adding a gid, make sure it doesn't match an existing
       # uid. Users and groups with the same name should have equal
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index d5cfd87520c5..8de41467ce12 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -29,6 +29,7 @@
   ./config/vpnc.nix
   ./config/zram.nix
   ./hardware/all-firmware.nix
+  ./hardware/brightnessctl.nix
   ./hardware/ckb.nix
   ./hardware/cpu/amd-microcode.nix
   ./hardware/cpu/intel-microcode.nix
@@ -50,6 +51,7 @@
   ./hardware/video/bumblebee.nix
   ./hardware/video/displaylink.nix
   ./hardware/video/nvidia.nix
+  ./hardware/video/uvcvideo/default.nix
   ./hardware/video/webcam/facetimehd.nix
   ./i18n/input-method/default.nix
   ./i18n/input-method/fcitx.nix
@@ -127,6 +129,7 @@
   ./programs/zsh/oh-my-zsh.nix
   ./programs/zsh/zsh.nix
   ./programs/zsh/zsh-autoenv.nix
+  ./programs/zsh/zsh-autosuggestions.nix
   ./programs/zsh/zsh-syntax-highlighting.nix
   ./rename.nix
   ./security/acme.nix
@@ -150,6 +153,7 @@
   ./security/rtkit.nix
   ./security/wrappers/default.nix
   ./security/sudo.nix
+  ./services/admin/oxidized.nix
   ./services/admin/salt/master.nix
   ./services/admin/salt/minion.nix
   ./services/amqp/activemq/default.nix
@@ -238,6 +242,7 @@
   ./services/desktops/gnome3/tracker-miners.nix
   ./services/desktops/profile-sync-daemon.nix
   ./services/desktops/telepathy.nix
+  ./services/development/bloop.nix
   ./services/development/hoogle.nix
   ./services/editors/emacs.nix
   ./services/editors/infinoted.nix
@@ -471,6 +476,7 @@
   ./services/networking/dnschain.nix
   ./services/networking/dnscrypt-proxy.nix
   ./services/networking/dnscrypt-wrapper.nix
+  ./services/networking/dnsdist.nix
   ./services/networking/dnsmasq.nix
   ./services/networking/ejabberd.nix
   ./services/networking/fakeroute.nix
@@ -513,6 +519,7 @@
   ./services/networking/miniupnpd.nix
   ./services/networking/mosquitto.nix
   ./services/networking/monero.nix
+  ./services/networking/morty.nix
   ./services/networking/miredo.nix
   ./services/networking/mstpd.nix
   ./services/networking/murmur.nix
@@ -535,6 +542,7 @@
   ./services/networking/openntpd.nix
   ./services/networking/openvpn.nix
   ./services/networking/ostinato.nix
+  ./services/networking/owamp.nix
   ./services/networking/pdnsd.nix
   ./services/networking/polipo.nix
   ./services/networking/powerdns.nix
@@ -655,6 +663,7 @@
   ./services/web-apps/tt-rss.nix
   ./services/web-apps/selfoss.nix
   ./services/web-apps/quassel-webserver.nix
+  ./services/web-apps/virtlyst.nix
   ./services/web-apps/youtrack.nix
   ./services/web-servers/apache-httpd/default.nix
   ./services/web-servers/caddy.nix
@@ -666,6 +675,7 @@
   ./services/web-servers/lighttpd/default.nix
   ./services/web-servers/lighttpd/gitweb.nix
   ./services/web-servers/lighttpd/inginious.nix
+  ./services/web-servers/meguca.nix
   ./services/web-servers/mighttpd2.nix
   ./services/web-servers/minio.nix
   ./services/web-servers/nginx/default.nix
@@ -785,6 +795,7 @@
   ./virtualisation/lxd.nix
   ./virtualisation/amazon-options.nix
   ./virtualisation/hyperv-guest.nix
+  ./virtualisation/kvmgt.nix
   ./virtualisation/openvswitch.nix
   ./virtualisation/parallels-guest.nix
   ./virtualisation/qemu-guest-agent.nix
diff --git a/nixos/modules/profiles/base.nix b/nixos/modules/profiles/base.nix
index 52481d90eab9..406a69722de6 100644
--- a/nixos/modules/profiles/base.nix
+++ b/nixos/modules/profiles/base.nix
@@ -29,7 +29,6 @@
     # Hardware-related tools.
     pkgs.sdparm
     pkgs.hdparm
-    pkgs.dmraid
     pkgs.smartmontools # for diagnosing hard disks
     pkgs.pciutils
     pkgs.usbutils
diff --git a/nixos/modules/programs/environment.nix b/nixos/modules/programs/environment.nix
index 401d152941a0..06ebb7bc729b 100644
--- a/nixos/modules/programs/environment.nix
+++ b/nixos/modules/programs/environment.nix
@@ -33,8 +33,6 @@ in
     environment.profileRelativeEnvVars =
       { PATH = [ "/bin" ];
         INFOPATH = [ "/info" "/share/info" ];
-        PKG_CONFIG_PATH = [ "/lib/pkgconfig" ];
-        PERL5LIB = [ "/lib/perl5/site_perl" ];
         KDEDIRS = [ "" ];
         STRIGI_PLUGIN_PATH = [ "/lib/strigi/" ];
         QT_PLUGIN_PATH = [ "/lib/qt4/plugins" "/lib/kde4/plugins" ];
diff --git a/nixos/modules/programs/mosh.nix b/nixos/modules/programs/mosh.nix
index b3aa55e189a3..359fe23e0ecd 100644
--- a/nixos/modules/programs/mosh.nix
+++ b/nixos/modules/programs/mosh.nix
@@ -16,10 +16,28 @@ in
       default = false;
       type = lib.types.bool;
     };
+    withUtempter = mkOption {
+      description = ''
+        Whether to enable libutempter for mosh.
+        This is required so that mosh can write to /var/run/utmp (which can be queried with `who` to display currently connected user sessions).
+        Note, this will add a guid wrapper for the group utmp!
+      '';
+      default = true;
+      type = lib.types.bool;
+    };
   };
 
   config = mkIf cfg.enable {
     environment.systemPackages = with pkgs; [ mosh ];
     networking.firewall.allowedUDPPortRanges = [ { from = 60000; to = 61000; } ];
+    security.wrappers = mkIf cfg.withUtempter {
+      utempter = {
+        source = "${pkgs.libutempter}/lib/utempter/utempter";
+        owner = "nobody";
+        group = "utmp";
+        setuid = false;
+        setgid = true;
+      };
+    };
   };
 }
diff --git a/nixos/modules/programs/npm.nix b/nixos/modules/programs/npm.nix
index 7ef172355c1f..5fdd4fa841a1 100644
--- a/nixos/modules/programs/npm.nix
+++ b/nixos/modules/programs/npm.nix
@@ -1,4 +1,4 @@
-{ config, lib, ... }:
+{ config, lib, pkgs, ... }:
 
 with lib;
 
@@ -39,6 +39,8 @@ in
     environment.etc."npmrc".text = cfg.npmrc;
 
     environment.variables.NPM_CONFIG_GLOBALCONFIG = "/etc/npmrc";
+
+    environment.systemPackages = [ pkgs.nodePackages.npm ];
   };
 
 }
diff --git a/nixos/modules/programs/screen.nix b/nixos/modules/programs/screen.nix
index f82338a69d25..c1daaa58f16f 100644
--- a/nixos/modules/programs/screen.nix
+++ b/nixos/modules/programs/screen.nix
@@ -1,4 +1,4 @@
-{ config, lib, ... }:
+{ config, lib, pkgs, ... }:
 
 let
   inherit (lib) mkOption mkIf types;
@@ -25,6 +25,8 @@ in
 
   config = mkIf (cfg.screenrc != "") {
     environment.etc."screenrc".text = cfg.screenrc;
+
+    environment.systemPackages = [ pkgs.screen ];
   };
 
 }
diff --git a/nixos/modules/programs/shell.nix b/nixos/modules/programs/shell.nix
index 3504a8a924b0..56fe347528bd 100644
--- a/nixos/modules/programs/shell.nix
+++ b/nixos/modules/programs/shell.nix
@@ -23,39 +23,39 @@ in
     environment.shellInit =
       ''
         # Set up the per-user profile.
-        mkdir -m 0755 -p $NIX_USER_PROFILE_DIR
-        if test "$(stat --printf '%u' $NIX_USER_PROFILE_DIR)" != "$(id -u)"; then
-            echo "WARNING: bad ownership on $NIX_USER_PROFILE_DIR" >&2
+        mkdir -m 0755 -p "$NIX_USER_PROFILE_DIR"
+        if [ "$(stat --printf '%u' "$NIX_USER_PROFILE_DIR")" != "$(id -u)" ]; then
+            echo "WARNING: bad ownership on $NIX_USER_PROFILE_DIR, should be $(id -u)" >&2
         fi
 
-        if test -w $HOME; then
-          if ! test -L $HOME/.nix-profile; then
-              if test "$USER" != root; then
-                  ln -s $NIX_USER_PROFILE_DIR/profile $HOME/.nix-profile
+        if [ -w "$HOME" ]; then
+          if ! [ -L "$HOME/.nix-profile" ]; then
+              if [ "$USER" != root ]; then
+                  ln -s "$NIX_USER_PROFILE_DIR/profile" "$HOME/.nix-profile"
               else
                   # Root installs in the system-wide profile by default.
-                  ln -s /nix/var/nix/profiles/default $HOME/.nix-profile
+                  ln -s /nix/var/nix/profiles/default "$HOME/.nix-profile"
               fi
           fi
 
           # Subscribe the root user to the NixOS channel by default.
-          if [ "$USER" = root -a ! -e $HOME/.nix-channels ]; then
-              echo "${config.system.nixos.defaultChannel} nixos" > $HOME/.nix-channels
+          if [ "$USER" = root -a ! -e "$HOME/.nix-channels" ]; then
+              echo "${config.system.nixos.defaultChannel} nixos" > "$HOME/.nix-channels"
           fi
 
           # Create the per-user garbage collector roots directory.
-          NIX_USER_GCROOTS_DIR=/nix/var/nix/gcroots/per-user/$USER
-          mkdir -m 0755 -p $NIX_USER_GCROOTS_DIR
-          if test "$(stat --printf '%u' $NIX_USER_GCROOTS_DIR)" != "$(id -u)"; then
-              echo "WARNING: bad ownership on $NIX_USER_GCROOTS_DIR" >&2
+          NIX_USER_GCROOTS_DIR="/nix/var/nix/gcroots/per-user/$USER"
+          mkdir -m 0755 -p "$NIX_USER_GCROOTS_DIR"
+          if [ "$(stat --printf '%u' "$NIX_USER_GCROOTS_DIR")" != "$(id -u)" ]; then
+              echo "WARNING: bad ownership on $NIX_USER_GCROOTS_DIR, should be $(id -u)" >&2
           fi
 
           # Set up a default Nix expression from which to install stuff.
-          if [ ! -e $HOME/.nix-defexpr -o -L $HOME/.nix-defexpr ]; then
-              rm -f $HOME/.nix-defexpr
-              mkdir -p $HOME/.nix-defexpr
+          if [ ! -e "$HOME/.nix-defexpr" -o -L "$HOME/.nix-defexpr" ]; then
+              rm -f "$HOME/.nix-defexpr"
+              mkdir -p "$HOME/.nix-defexpr"
               if [ "$USER" != root ]; then
-                  ln -s /nix/var/nix/profiles/per-user/root/channels $HOME/.nix-defexpr/channels_root
+                  ln -s /nix/var/nix/profiles/per-user/root/channels "$HOME/.nix-defexpr/channels_root"
               fi
           fi
         fi
diff --git a/nixos/modules/programs/thefuck.nix b/nixos/modules/programs/thefuck.nix
index eb913477cf05..f4ae52934760 100644
--- a/nixos/modules/programs/thefuck.nix
+++ b/nixos/modules/programs/thefuck.nix
@@ -31,8 +31,8 @@ in
       environment.systemPackages = with pkgs; [ thefuck ];
       environment.shellInit = initScript;
 
-      programs.zsh.shellInit = mkIf prg.zsh.enable initScript;
-      programs.fish.shellInit = mkIf prg.fish.enable ''
+      programs.zsh.interactiveShellInit = mkIf prg.zsh.enable initScript;
+      programs.fish.interactiveShellInit = mkIf prg.fish.enable ''
         ${pkgs.thefuck}/bin/thefuck --alias | source
       '';
     };
diff --git a/nixos/modules/programs/zsh/zsh-autosuggestions.nix b/nixos/modules/programs/zsh/zsh-autosuggestions.nix
new file mode 100644
index 000000000000..416f4c9c6751
--- /dev/null
+++ b/nixos/modules/programs/zsh/zsh-autosuggestions.nix
@@ -0,0 +1,60 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.zsh.autosuggestions;
+in
+{
+  options.programs.zsh.autosuggestions = {
+
+    enable = mkEnableOption "zsh-autosuggestions";
+
+    highlightStyle = mkOption {
+      type = types.str;
+      default = "fg=8"; # https://github.com/zsh-users/zsh-autosuggestions/tree/v0.4.3#suggestion-highlight-style
+      description = "Highlight style for suggestions ({fore,back}ground color)";
+      example = "fg=cyan";
+    };
+
+    strategy = mkOption {
+      type = types.enum [ "default" "match_prev_cmd" ];
+      default = "default";
+      description = ''
+        Set ZSH_AUTOSUGGEST_STRATEGY to choose the strategy for generating suggestions.
+        There are currently two to choose from:
+
+          * default: Chooses the most recent match.
+          * match_prev_cmd: Chooses the most recent match whose preceding history item matches
+            the most recently executed command (more info). Note that this strategy won't work as
+            expected with ZSH options that don't preserve the history order such as
+            HIST_IGNORE_ALL_DUPS or HIST_EXPIRE_DUPS_FIRST.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = with types; attrsOf str;
+      default = {};
+      description = "Attribute set with additional configuration values";
+      example = literalExample ''
+        {
+          "ZSH_AUTOSUGGEST_BUFFER_MAX_SIZE" = "20";
+        }
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    programs.zsh.interactiveShellInit = ''
+      source ${pkgs.zsh-autosuggestions}/share/zsh-autosuggestions/zsh-autosuggestions.zsh
+
+      export ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE="${cfg.highlightStyle}"
+      export ZSH_AUTOSUGGEST_STRATEGY="${cfg.strategy}"
+
+      ${concatStringsSep "\n" (mapAttrsToList (key: value: ''export ${key}="${value}"'') cfg.extraConfig)}
+    '';
+
+  };
+}
diff --git a/nixos/modules/programs/zsh/zsh.nix b/nixos/modules/programs/zsh/zsh.nix
index f689250dc61f..42d4e1d4ada0 100644
--- a/nixos/modules/programs/zsh/zsh.nix
+++ b/nixos/modules/programs/zsh/zsh.nix
@@ -69,7 +69,9 @@ in
 
       promptInit = mkOption {
         default = ''
-          autoload -U promptinit && promptinit && prompt walters
+          if [ "$TERM" != dumb ]; then
+            autoload -U promptinit && promptinit && prompt walters
+          fi
         '';
         description = ''
           Shell script code used to initialise the zsh prompt.
@@ -85,13 +87,6 @@ in
         type = types.bool;
       };
 
-      enableAutosuggestions = mkOption {
-        default = false;
-        description = ''
-          Enable zsh-autosuggestions
-        '';
-        type = types.bool;
-      };
     };
 
   };
@@ -166,10 +161,6 @@ in
 
         ${optionalString cfg.enableCompletion "autoload -U compinit && compinit"}
 
-        ${optionalString (cfg.enableAutosuggestions)
-          "source ${pkgs.zsh-autosuggestions}/share/zsh-autosuggestions/zsh-autosuggestions.zsh"
-        }
-
         ${cfge.interactiveShellInit}
 
         ${cfg.interactiveShellInit}
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 8820a6da8c0b..7b094fc14203 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -31,6 +31,10 @@ with lib;
     (mkRenamedOptionModule [ "services" "graphite" "web" "host" ] [ "services" "graphite" "web" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "i2pd" "extIp" ] [ "services" "i2pd" "address" ])
     (mkRenamedOptionModule [ "services" "kibana" "host" ] [ "services" "kibana" "listenAddress" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
+    (mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "")
+    (mkRenamedOptionModule [ "services" "kubernetes" "addons" "dashboard" "enableRBAC" ] [ "services" "kubernetes" "addons" "dashboard" "rbac" "enable" ])
     (mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "mpd" "network" "host" ] [ "services" "mpd" "network" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "listenAddress" ])
@@ -196,6 +200,12 @@ with lib;
     (mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "forceAutohint" ] [ "fonts" "fontconfig" "forceAutohint" ])
     (mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "renderMonoTTFAsBitmap" ] [ "fonts" "fontconfig" "renderMonoTTFAsBitmap" ])
 
+    # postgresqlBackup
+    (mkRemovedOptionModule [ "services" "postgresqlBackup" "period" ] ''
+       A systemd timer is now used instead of cron.
+       The starting time can be configured via <literal>services.postgresqlBackup.startAt</literal>.
+    '')
+
     # Profile splitting
     (mkRenamedOptionModule [ "virtualization" "growPartition" ] [ "boot" "growPartition" ])
 
@@ -246,6 +256,8 @@ with lib;
     (mkRenamedOptionModule [ "programs" "zsh" "oh-my-zsh" "custom" ] [ "programs" "zsh" "ohMyZsh" "custom" ])
     (mkRenamedOptionModule [ "programs" "zsh" "oh-my-zsh" "plugins" ] [ "programs" "zsh" "ohMyZsh" "plugins" ])
 
+    (mkRenamedOptionModule [ "programs" "zsh" "enableAutosuggestions" ] [ "programs" "zsh" "autosuggestions" "enable" ])
+
     # Xen
     (mkRenamedOptionModule [ "virtualisation" "xen" "qemu-package" ] [ "virtualisation" "xen" "package-qemu" ])
 
diff --git a/nixos/modules/services/admin/oxidized.nix b/nixos/modules/services/admin/oxidized.nix
new file mode 100644
index 000000000000..891ca6323c3c
--- /dev/null
+++ b/nixos/modules/services/admin/oxidized.nix
@@ -0,0 +1,116 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.oxidized;
+in
+{
+  options.services.oxidized = {
+    enable = mkEnableOption "the oxidized configuation backup service.";
+
+    user = mkOption {
+      type = types.str;
+      default = "oxidized";
+      description = ''
+        User under which the oxidized service runs.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "oxidized";
+      description = ''
+        Group under which the oxidized service runs.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/oxidized";
+      description = "State directory for the oxidized service.";
+    };
+
+    configFile = mkOption {
+      type = types.path;
+      example = literalExample ''
+        pkgs.writeText "oxidized-config.yml" '''
+          ---
+          debug: true
+          use_syslog: true
+          input:
+            default: ssh
+            ssh:
+              secure: true
+          interval: 3600
+          model_map:
+            dell: powerconnect
+            hp: procurve
+          source:
+            default: csv
+            csv:
+              delimiter: !ruby/regexp /:/
+              file: "/var/lib/oxidized/.config/oxidized/router.db"
+              map:
+                name: 0
+                model: 1
+                username: 2
+                password: 3
+          pid: "/var/lib/oxidized/.config/oxidized/pid"
+          rest: 127.0.0.1:8888
+          retries: 3
+          # ... additional config
+        ''';
+      '';
+      description = ''
+        Path to the oxidized configuration file.
+      '';
+    };
+
+    routerDB = mkOption {
+      type = types.path;
+      example = literalExample ''
+        pkgs.writeText "oxidized-router.db" '''
+          hostname-sw1:powerconnect:username1:password2
+          hostname-sw2:procurve:username2:password2
+          # ... additional hosts
+        '''
+      '';
+      description = ''
+        Path to the file/database which contains the targets for oxidized.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.extraGroups.${cfg.group} = { };
+    users.extraUsers.${cfg.user} = {
+      description = "Oxidized service user";
+      group = cfg.group;
+      home = cfg.dataDir;
+      createHome = true;
+    };
+
+    systemd.services.oxidized = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      preStart = ''
+        mkdir -p ${cfg.dataDir}/.config/oxidized
+        cp -v ${cfg.routerDB} ${cfg.dataDir}/.config/oxidized/router.db
+        cp -v ${cfg.configFile} ${cfg.dataDir}/.config/oxidized/config
+      '';
+
+      serviceConfig = {
+        ExecStart = "${pkgs.oxidized}/bin/oxidized";
+        User = cfg.user;
+        Group = cfg.group;
+        UMask = "0077";
+        NoNewPrivileges = true;
+        Restart  = "always";
+        WorkingDirectory = cfg.dataDir;
+        KillSignal = "SIGKILL";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/audio/mpd.nix b/nixos/modules/services/audio/mpd.nix
index 5f379b392ea8..94020ed05d67 100644
--- a/nixos/modules/services/audio/mpd.nix
+++ b/nixos/modules/services/audio/mpd.nix
@@ -13,7 +13,9 @@ let
   mpdConf = pkgs.writeText "mpd.conf" ''
     music_directory     "${cfg.musicDirectory}"
     playlist_directory  "${cfg.playlistDirectory}"
-    db_file             "${cfg.dbFile}"
+    ${lib.optionalString (cfg.dbFile != null) ''
+      db_file             "${cfg.dbFile}"
+    ''}
     state_file          "${cfg.dataDir}/state"
     sticker_file        "${cfg.dataDir}/sticker.sql"
     log_file            "syslog"
@@ -126,11 +128,12 @@ in {
       };
 
       dbFile = mkOption {
-        type = types.str;
+        type = types.nullOr types.str;
         default = "${cfg.dataDir}/tag_cache";
         defaultText = ''''${dataDir}/tag_cache'';
         description = ''
-          The path to MPD's database.
+          The path to MPD's database. If set to <literal>null</literal> the
+          parameter is omitted from the configuration.
         '';
       };
     };
diff --git a/nixos/modules/services/audio/slimserver.nix b/nixos/modules/services/audio/slimserver.nix
index 7d661dd60408..640403d2c97d 100644
--- a/nixos/modules/services/audio/slimserver.nix
+++ b/nixos/modules/services/audio/slimserver.nix
@@ -51,7 +51,8 @@ in {
       serviceConfig = {
         User = "slimserver";
         PermissionsStartOnly = true;
-        ExecStart = "${cfg.package}/slimserver.pl --logdir ${cfg.dataDir}/logs --prefsdir ${cfg.dataDir}/prefs --cachedir ${cfg.dataDir}/cache";
+        # Issue 40589: Disable broken image/video support (audio still works!)
+        ExecStart = "${cfg.package}/slimserver.pl --logdir ${cfg.dataDir}/logs --prefsdir ${cfg.dataDir}/prefs --cachedir ${cfg.dataDir}/cache --noimage --novideo";
       };
     };
 
diff --git a/nixos/modules/services/backup/borgbackup.nix b/nixos/modules/services/backup/borgbackup.nix
index 1b730e0c2b76..0c3fc9af6f88 100644
--- a/nixos/modules/services/backup/borgbackup.nix
+++ b/nixos/modules/services/backup/borgbackup.nix
@@ -35,25 +35,26 @@ let
     ${cfg.preHook}
   '' + optionalString cfg.doInit ''
     # Run borg init if the repo doesn't exist yet
-    if ! borg list > /dev/null; then
-      borg init \
+    if ! borg list $extraArgs > /dev/null; then
+      borg init $extraArgs \
         --encryption ${cfg.encryption.mode} \
         $extraInitArgs
       ${cfg.postInit}
     fi
   '' + ''
-    borg create \
+    borg create $extraArgs \
       --compression ${cfg.compression} \
       --exclude-from ${mkExcludeFile cfg} \
       $extraCreateArgs \
       "::$archiveName$archiveSuffix" \
       ${escapeShellArgs cfg.paths}
   '' + optionalString cfg.appendFailedSuffix ''
-    borg rename "::$archiveName$archiveSuffix" "$archiveName"
+    borg rename $extraArgs \
+      "::$archiveName$archiveSuffix" "$archiveName"
   '' + ''
     ${cfg.postCreate}
   '' + optionalString (cfg.prune.keep != { }) ''
-    borg prune \
+    borg prune $extraArgs \
       ${mkKeepArgs cfg} \
       --prefix ${escapeShellArg cfg.prune.prefix} \
       $extraPruneArgs
@@ -85,13 +86,14 @@ let
         ProtectSystem = "strict";
         ReadWritePaths =
           [ "${userHome}/.config/borg" "${userHome}/.cache/borg" ]
+          ++ cfg.readWritePaths
           # Borg needs write access to repo if it is not remote
           ++ optional (isLocalPath cfg.repo) cfg.repo;
-        PrivateTmp = true;
+        PrivateTmp = cfg.privateTmp;
       };
       environment = {
         BORG_REPO = cfg.repo;
-        inherit (cfg) extraInitArgs extraCreateArgs extraPruneArgs;
+        inherit (cfg) extraArgs extraInitArgs extraCreateArgs extraPruneArgs;
       } // (mkPassEnv cfg) // cfg.environment;
       inherit (cfg) startAt;
     };
@@ -318,6 +320,30 @@ in {
             ];
           };
 
+          readWritePaths = mkOption {
+            type = with types; listOf path;
+            description = ''
+              By default, borg cannot write anywhere on the system but
+              <literal>$HOME/.config/borg</literal> and <literal>$HOME/.cache/borg</literal>.
+              If, for example, your preHook script needs to dump files
+              somewhere, put those directories here.
+            '';
+            default = [ ];
+            example = [
+              "/var/backup/mysqldump"
+            ];
+          };
+
+          privateTmp = mkOption {
+            type = types.bool;
+            description = ''
+              Set the <literal>PrivateTmp</literal> option for
+              the systemd-service. Set to false if you need sockets
+              or other files from global /tmp.
+            '';
+            default = true;
+          };
+
           doInit = mkOption {
             type = types.bool;
             description = ''
@@ -430,6 +456,16 @@ in {
             default = "";
           };
 
+          extraArgs = mkOption {
+            type = types.str;
+            description = ''
+              Additional arguments for all <command>borg</command> calls the
+              service has. Handle with care.
+            '';
+            default = "";
+            example = "--remote-path=/path/to/borg";
+          };
+
           extraInitArgs = mkOption {
             type = types.str;
             description = ''
diff --git a/nixos/modules/services/backup/duplicati.nix b/nixos/modules/services/backup/duplicati.nix
index 9772ca4d20a7..379fde1fe038 100644
--- a/nixos/modules/services/backup/duplicati.nix
+++ b/nixos/modules/services/backup/duplicati.nix
@@ -9,6 +9,23 @@ in
   options = {
     services.duplicati = {
       enable = mkEnableOption "Duplicati";
+
+      port = mkOption {
+        default = 8200;
+        type = types.int;
+        description = ''
+          Port serving the web interface
+        '';
+      };
+
+      interface = mkOption {
+        default = "lo";
+        type = types.str;
+        description = ''
+          Listening interface for the web UI
+          Set it to "any" to listen on all available interfaces
+        '';
+      };
     };
   };
 
@@ -22,7 +39,7 @@ in
       serviceConfig = {
         User = "duplicati";
         Group = "duplicati";
-        ExecStart = "${pkgs.duplicati}/bin/duplicati-server --webservice-interface=any --webservice-port=8200 --server-datafolder=/var/lib/duplicati";
+        ExecStart = "${pkgs.duplicati}/bin/duplicati-server --webservice-interface=${cfg.interface} --webservice-port=${toString cfg.port} --server-datafolder=/var/lib/duplicati";
         Restart = "on-failure";
       };
     };
diff --git a/nixos/modules/services/backup/postgresql-backup.nix b/nixos/modules/services/backup/postgresql-backup.nix
index 4a5ebebc682e..2ec78ce6f2cf 100644
--- a/nixos/modules/services/backup/postgresql-backup.nix
+++ b/nixos/modules/services/backup/postgresql-backup.nix
@@ -3,18 +3,41 @@
 with lib;
 
 let
-  inherit (pkgs) gzip;
 
-  location = config.services.postgresqlBackup.location;
+  cfg = config.services.postgresqlBackup;
 
-  postgresqlBackupCron = db:
-    ''
-      ${config.services.postgresqlBackup.period} root ${config.services.postgresql.package}/bin/pg_dump ${db} | ${gzip}/bin/gzip -c > ${location}/${db}.gz
-    '';
+  postgresqlBackupService = db :
+    {
+      enable = true;
 
-in
+      description = "Backup of database ${db}";
 
-{
+      requires = [ "postgresql.service" ];
+
+      preStart = ''
+        mkdir -m 0700 -p ${cfg.location}
+        chown postgres ${cfg.location}
+      '';
+
+      script = ''
+        if [ -e ${cfg.location}/${db}.sql.gz ]; then
+          ${pkgs.coreutils}/bin/mv ${cfg.location}/${db}.sql.gz ${cfg.location}/${db}.prev.sql.gz
+        fi
+
+        ${config.services.postgresql.package}/bin/pg_dump ${cfg.pgdumpOptions} ${db} | \
+          ${pkgs.gzip}/bin/gzip -c > ${cfg.location}/${db}.sql.gz
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        PermissionsStartOnly = "true";
+        User = "postgres";
+      };
+
+      startAt = cfg.startAt;
+    };
+
+in {
 
   options = {
 
@@ -27,10 +50,10 @@ in
         '';
       };
 
-      period = mkOption {
-        default = "15 01 * * *";
+      startAt = mkOption {
+        default = "*-*-* 01:15:00";
         description = ''
-          This option defines (in the format used by cron) when the
+          This option defines (see <literal>systemd.time</literal> for format) when the
           databases should be dumped.
           The default is to update at 01:15 (at night) every day.
         '';
@@ -49,18 +72,23 @@ in
           Location to put the gzipped PostgreSQL database dumps.
         '';
       };
+
+      pgdumpOptions = mkOption {
+        type = types.string;
+        default = "-Cbo";
+        description = ''
+          Command line options for pg_dump.
+        '';
+      };
     };
 
   };
 
   config = mkIf config.services.postgresqlBackup.enable {
-    services.cron.systemCronJobs = map postgresqlBackupCron config.services.postgresqlBackup.databases;
 
-    system.activationScripts.postgresqlBackup = stringAfter [ "stdio" "users" ]
-      ''
-        mkdir -m 0700 -p ${config.services.postgresqlBackup.location}
-        chown root ${config.services.postgresqlBackup.location}
-      '';
+    systemd.services = listToAttrs (map (db : {
+          name = "postgresqlBackup-${db}";
+          value = postgresqlBackupService db; } ) cfg.databases);
   };
 
 }
diff --git a/nixos/modules/services/cluster/kubernetes/dashboard.nix b/nixos/modules/services/cluster/kubernetes/dashboard.nix
index e331889b9dd5..6d9faada4401 100644
--- a/nixos/modules/services/cluster/kubernetes/dashboard.nix
+++ b/nixos/modules/services/cluster/kubernetes/dashboard.nix
@@ -4,38 +4,61 @@ with lib;
 
 let
   cfg = config.services.kubernetes.addons.dashboard;
-
-  name = "gcr.io/google_containers/kubernetes-dashboard-amd64";
-	version = "v1.8.2";
-
-  image = pkgs.dockerTools.pullImage {
-    imageName = name;
-    imageTag = version;
-    sha256 = "11h0fz3wxp0f10fsyqaxjm7l2qg7xws50dv5iwlck5gb1fjmajad";
-  };
 in {
   options.services.kubernetes.addons.dashboard = {
     enable = mkEnableOption "kubernetes dashboard addon";
 
-    enableRBAC = mkOption {
-      description = "Whether to enable role based access control is enabled for kubernetes dashboard";
-      type = types.bool;
-      default = elem "RBAC" config.services.kubernetes.apiserver.authorizationMode;
+    rbac = mkOption {
+      description = "Role-based access control (RBAC) options";
+      type = types.submodule {
+
+        options = {
+          enable = mkOption {
+            description = "Whether to enable role based access control is enabled for kubernetes dashboard";
+            type = types.bool;
+            default = elem "RBAC" config.services.kubernetes.apiserver.authorizationMode;
+          };
+
+          clusterAdmin = mkOption {
+            description = "Whether to assign cluster admin rights to the kubernetes dashboard";
+            type = types.bool;
+            default = false;
+          };
+          
+        };
+      };
+    };
+
+    version = mkOption {
+      description = "Which version of the kubernetes dashboard to deploy";
+      type = types.str;
+      default = "v1.8.3";
+    };
+
+    image = mkOption {
+      description = "Docker image to seed for the kubernetes dashboard container.";
+      type = types.attrs;
+      default = {
+        imageName = "k8s.gcr.io/kubernetes-dashboard-amd64";
+        imageDigest = "sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0";
+        finalImageTag = cfg.version;
+        sha256 = "18ajcg0q1vignfjk2sm4xj4wzphfz8wah69ps8dklqfvv0164mc8";
+      };
     };
   };
 
   config = mkIf cfg.enable {
-    services.kubernetes.kubelet.seedDockerImages = [image];
+    services.kubernetes.kubelet.seedDockerImages = [(pkgs.dockerTools.pullImage cfg.image)];
 
     services.kubernetes.addonManager.addons = {
       kubernetes-dashboard-deployment = {
         kind = "Deployment";
-        apiVersion = "apps/v1beta1";
+        apiVersion = "apps/v1";
         metadata = {
           labels = {
             k8s-addon = "kubernetes-dashboard.addons.k8s.io";
             k8s-app = "kubernetes-dashboard";
-            version = version;
+            version = cfg.version;
             "kubernetes.io/cluster-service" = "true";
             "addonmanager.kubernetes.io/mode" = "Reconcile";
           };
@@ -51,45 +74,66 @@ in {
               labels = {
                 k8s-addon = "kubernetes-dashboard.addons.k8s.io";
                 k8s-app = "kubernetes-dashboard";
-                version = version;
+                version = cfg.version;
                 "kubernetes.io/cluster-service" = "true";
               };
               annotations = {
                 "scheduler.alpha.kubernetes.io/critical-pod" = "";
-                #"scheduler.alpha.kubernetes.io/tolerations" = ''[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'';
               };
             };
             spec = {
+              priorityClassName = "system-cluster-critical";
               containers = [{
                 name = "kubernetes-dashboard";
-                image = "${name}:${version}";
+                image = with cfg.image; "${imageName}:${finalImageTag}";
                 ports = [{
-                  containerPort = 9090;
+                  containerPort = 8443;
                   protocol = "TCP";
                 }];
                 resources = {
                   limits = {
                     cpu = "100m";
-                    memory = "50Mi";
+                    memory = "300Mi";
                   };
                   requests = {
                     cpu = "100m";
-                    memory = "50Mi";
+                    memory = "100Mi";
                   };
                 };
+                args = ["--auto-generate-certificates"];
+                volumeMounts = [{
+                  name = "tmp-volume";
+                  mountPath = "/tmp";
+                } {
+                  name = "kubernetes-dashboard-certs";
+                  mountPath = "/certs";
+                }];
                 livenessProbe = {
                   httpGet = {
+                    scheme = "HTTPS";
                     path = "/";
-                    port = 9090;
+                    port = 8443;
                   };
                   initialDelaySeconds = 30;
                   timeoutSeconds = 30;
                 };
               }];
+              volumes = [{
+                name = "kubernetes-dashboard-certs";
+                secret = {
+                  secretName = "kubernetes-dashboard-certs";
+                };
+              } {
+                name = "tmp-volume";
+                emptyDir = {};
+              }];
               serviceAccountName = "kubernetes-dashboard";
               tolerations = [{
                 key = "node-role.kubernetes.io/master";
                 effect = "NoSchedule";
+              } {
+                key = "CriticalAddonsOnly";
+                operator = "Exists";
               }];
             };
           };
@@ -112,8 +156,8 @@ in {
         };
         spec = {
           ports = [{
-            port = 80;
-            targetPort = 9090;
+            port = 443;
+            targetPort = 8443;
           }];
           selector.k8s-app = "kubernetes-dashboard";
         };
@@ -126,35 +170,153 @@ in {
           labels = {
             k8s-app = "kubernetes-dashboard";
             k8s-addon = "kubernetes-dashboard.addons.k8s.io";
-						"addonmanager.kubernetes.io/mode" = "Reconcile";
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
           };
           name = "kubernetes-dashboard";
           namespace = "kube-system";
         };
       };
-    } // (optionalAttrs cfg.enableRBAC {
-      kubernetes-dashboard-crb = {
-        apiVersion = "rbac.authorization.k8s.io/v1beta1";
-        kind = "ClusterRoleBinding";
+      kubernetes-dashboard-sec-certs = {
+        apiVersion = "v1";
+        kind = "Secret";
         metadata = {
-          name = "kubernetes-dashboard";
           labels = {
             k8s-app = "kubernetes-dashboard";
-            k8s-addon = "kubernetes-dashboard.addons.k8s.io";
-            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
           };
+          name = "kubernetes-dashboard-certs";
+          namespace = "kube-system";
         };
-        roleRef = {
-          apiGroup = "rbac.authorization.k8s.io";
-          kind = "ClusterRole";
-          name = "cluster-admin";
+        type = "Opaque";
+      };
+      kubernetes-dashboard-sec-kholder = {
+        apiVersion = "v1";
+        kind = "Secret";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
+          };
+          name = "kubernetes-dashboard-key-holder";
+          namespace = "kube-system";
         };
+        type = "Opaque";
+      };
+      kubernetes-dashboard-cm = {
+        apiVersion = "v1";
+        kind = "ConfigMap";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
+          };
+          name = "kubernetes-dashboard-settings";
+          namespace = "kube-system";
+        };
+      };
+    } // (optionalAttrs cfg.rbac.enable
+      (let
         subjects = [{
           kind = "ServiceAccount";
           name = "kubernetes-dashboard";
           namespace = "kube-system";
         }];
-      };
-    });
+        labels = {
+          k8s-app = "kubernetes-dashboard";
+          k8s-addon = "kubernetes-dashboard.addons.k8s.io";
+          "addonmanager.kubernetes.io/mode" = "Reconcile";
+        };
+      in
+        (if cfg.rbac.clusterAdmin then {
+          kubernetes-dashboard-crb = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "ClusterRoleBinding";
+            metadata = {
+              name = "kubernetes-dashboard";
+              inherit labels;
+            };
+            roleRef = {
+              apiGroup = "rbac.authorization.k8s.io";
+              kind = "ClusterRole";
+              name = "cluster-admin";
+            };
+            inherit subjects;
+          };
+        }
+        else
+        {
+          # Upstream role- and rolebinding as per:
+          # https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/alternative/kubernetes-dashboard.yaml
+          kubernetes-dashboard-role = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "Role";
+            metadata = {
+              name = "kubernetes-dashboard-minimal";
+              namespace = "kube-system";
+              inherit labels;
+            };
+            rules = [
+              # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
+              {
+                apiGroups = [""];
+                resources = ["secrets"];
+                verbs = ["create"];
+              }
+              # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
+              {
+                apiGroups = [""];
+                resources = ["configmaps"];
+                verbs = ["create"];
+              }
+              # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
+              {
+                apiGroups = [""];
+                resources = ["secrets"];
+                resourceNames = ["kubernetes-dashboard-key-holder"];
+                verbs = ["get" "update" "delete"];
+              }
+              # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
+              {
+                apiGroups = [""];
+                resources = ["configmaps"];
+                resourceNames = ["kubernetes-dashboard-settings"];
+                verbs = ["get" "update"];
+              }
+              # Allow Dashboard to get metrics from heapster.
+              {
+                apiGroups = [""];
+                resources = ["services"];
+                resourceNames = ["heapster"];
+                verbs = ["proxy"];
+              }
+              {
+                apiGroups = [""];
+                resources = ["services/proxy"];
+                resourceNames = ["heapster" "http:heapster:" "https:heapster:"];
+                verbs = ["get"];
+              }
+            ];
+          };
+
+          kubernetes-dashboard-rb = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "RoleBinding";
+            metadata = {
+              name = "kubernetes-dashboard-minimal";
+              namespace = "kube-system";
+              inherit labels;
+            };
+            roleRef = {
+              apiGroup = "rbac.authorization.k8s.io";
+              kind = "Role";
+              name = "kubernetes-dashboard-minimal";
+            };
+            inherit subjects;
+          };
+        })
+    ));
   };
 }
diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix
index aeb0a0d2432d..5e87ae88f5a8 100644
--- a/nixos/modules/services/cluster/kubernetes/default.nix
+++ b/nixos/modules/services/cluster/kubernetes/default.nix
@@ -5,6 +5,37 @@ with lib;
 let
   cfg = config.services.kubernetes;
 
+  # YAML config; see:
+  #   https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/
+  #   https://github.com/kubernetes/kubernetes/blob/release-1.10/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go
+  #
+  # TODO: migrate the following flags to this config file
+  #
+  #   --pod-manifest-path
+  #   --address
+  #   --port
+  #   --tls-cert-file
+  #   --tls-private-key-file
+  #   --client-ca-file
+  #   --authentication-token-webhook
+  #   --authentication-token-webhook-cache-ttl
+  #   --authorization-mode
+  #   --healthz-bind-address
+  #   --healthz-port
+  #   --allow-privileged
+  #   --cluster-dns
+  #   --cluster-domain
+  #   --hairpin-mode
+  #   --feature-gates
+  kubeletConfig = pkgs.runCommand "kubelet-config.yaml" { } ''
+    echo > $out ${pkgs.lib.escapeShellArg (builtins.toJSON {
+      kind = "KubeletConfiguration";
+      apiVersion = "kubelet.config.k8s.io/v1beta1";
+      ${if cfg.kubelet.applyManifests then "staticPodPath" else null} =
+        manifests;
+    })}
+  '';
+
   skipAttrs = attrs: map (filterAttrs (k: v: k != "enable"))
     (filter (v: !(hasAttr "enable" v) || v.enable) attrs);
 
@@ -42,12 +73,14 @@ let
   mkKubeConfigOptions = prefix: {
     server = mkOption {
       description = "${prefix} kube-apiserver server address.";
-      default = "http://${cfg.apiserver.address}:${toString cfg.apiserver.port}";
+      default = "http://${if cfg.apiserver.advertiseAddress != null
+                          then cfg.apiserver.advertiseAddress
+                          else "127.0.0.1"}:${toString cfg.apiserver.port}";
       type = types.str;
     };
 
     caFile = mkOption {
-      description = "${prefix} certificate authrority file used to connect to kube-apiserver.";
+      description = "${prefix} certificate authority file used to connect to kube-apiserver.";
       type = types.nullOr types.path;
       default = cfg.caFile;
     };
@@ -72,12 +105,18 @@ let
     keyFile = mkDefault cfg.kubeconfig.keyFile;
   };
 
-  cniConfig = pkgs.buildEnv {
-    name = "kubernetes-cni-config";
-    paths = imap (i: entry:
-      pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
-    ) cfg.kubelet.cni.config;
-  };
+  cniConfig =
+    if cfg.kubelet.cni.config != [] && !(isNull cfg.kubelet.cni.configDir) then
+      throw "Verbatim CNI-config and CNI configDir cannot both be set."
+    else if !(isNull cfg.kubelet.cni.configDir) then
+      cfg.kubelet.cni.configDir
+    else
+      (pkgs.buildEnv {
+        name = "kubernetes-cni-config";
+        paths = imap (i: entry:
+          pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
+        ) cfg.kubelet.cni.config;
+      });
 
   manifests = pkgs.buildEnv {
     name = "kubernetes-manifests";
@@ -213,18 +252,13 @@ in {
         type = types.listOf types.str;
       };
 
-      address = mkOption {
-        description = "Kubernetes apiserver listening address.";
-        default = "127.0.0.1";
-        type = types.str;
-      };
-
-      publicAddress = mkOption {
+      bindAddress = mkOption {
         description = ''
-          Kubernetes apiserver public listening address used for read only and
-          secure port.
+          The IP address on which to listen for the --secure-port port.
+          The associated interface(s) must be reachable by the rest
+          of the cluster, and by CLI/web clients.
         '';
-        default = cfg.apiserver.address;
+        default = "0.0.0.0";
         type = types.str;
       };
 
@@ -279,7 +313,7 @@ in {
       tokenAuthFile = mkOption {
         description = ''
           Kubernetes apiserver token authentication file. See
-          <link xlink:href="https://kubernetes.io/docs/admin/authentication.html"/>
+          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
         '';
         default = null;
         type = types.nullOr types.path;
@@ -288,7 +322,7 @@ in {
       basicAuthFile = mkOption {
         description = ''
           Kubernetes apiserver basic authentication file. See
-          <link xlink:href="https://kubernetes.io/docs/admin/authentication.html"/>
+          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
         '';
         default = pkgs.writeText "users" ''
           kubernetes,admin,0
@@ -298,22 +332,31 @@ in {
 
       authorizationMode = mkOption {
         description = ''
-          Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/RBAC). See
-          <link xlink:href="https://kubernetes.io/docs/admin/authorization.html"/>
+          Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
+          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
         '';
         default = ["RBAC" "Node"];
-        type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "RBAC" "Node"]);
+        type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
       };
 
       authorizationPolicy = mkOption {
         description = ''
           Kubernetes apiserver authorization policy file. See
-          <link xlink:href="https://kubernetes.io/docs/admin/authorization.html"/>
+          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
         '';
         default = [];
         type = types.listOf types.attrs;
       };
 
+      webhookConfig = mkOption {
+        description = ''
+          Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
+          See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
+        '';
+        default = null;
+        type = types.nullOr types.path;
+      };
+
       allowPrivileged = mkOption {
         description = "Whether to allow privileged containers on Kubernetes.";
         default = true;
@@ -332,16 +375,16 @@ in {
       runtimeConfig = mkOption {
         description = ''
           Api runtime configuration. See
-          <link xlink:href="https://kubernetes.io/docs/admin/cluster-management.html"/>
+          <link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
         '';
         default = "authentication.k8s.io/v1beta1=true";
         example = "api/all=false,api/v1=true";
         type = types.str;
       };
 
-      admissionControl = mkOption {
+      enableAdmissionPlugins = mkOption {
         description = ''
-          Kubernetes admission control plugins to use. See
+          Kubernetes admission control plugins to enable. See
           <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
         '';
         default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds" "NodeRestriction"];
@@ -353,6 +396,15 @@ in {
         type = types.listOf types.str;
       };
 
+      disableAdmissionPlugins = mkOption {
+        description = ''
+          Kubernetes admission control plugins to disable. See
+          <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
+        '';
+        default = [];
+        type = types.listOf types.str;
+      };
+
       serviceAccountKeyFile = mkOption {
         description = ''
           Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
@@ -573,6 +625,7 @@ in {
         type = types.bool;
       };
 
+      # TODO: remove this deprecated flag
       cadvisorPort = mkOption {
         description = "Kubernetes kubelet local cadvisor port.";
         default = 4194;
@@ -629,6 +682,12 @@ in {
             }]
           '';
         };
+
+        configDir = mkOption {
+          description = "Path to Kubernetes CNI configuration directory.";
+          type = types.nullOr types.path;
+          default = null;
+        };
       };
 
       manifests = mkOption {
@@ -783,12 +842,10 @@ in {
         serviceConfig = {
           Slice = "kubernetes.slice";
           ExecStart = ''${cfg.package}/bin/kubelet \
-            ${optionalString cfg.kubelet.applyManifests
-              "--pod-manifest-path=${manifests}"} \
             ${optionalString (taints != "")
               "--register-with-taints=${taints}"} \
             --kubeconfig=${mkKubeConfig "kubelet" cfg.kubelet.kubeconfig} \
-            --require-kubeconfig \
+            --config=${kubeletConfig} \
             --address=${cfg.kubelet.address} \
             --port=${toString cfg.kubelet.port} \
             --register-node=${boolToString cfg.kubelet.registerNode} \
@@ -853,7 +910,7 @@ in {
 
     (mkIf cfg.apiserver.enable {
       systemd.services.kube-apiserver = {
-        description = "Kubernetes Kubelet Service";
+        description = "Kubernetes APIServer Service";
         wantedBy = [ "kubernetes.target" ];
         after = [ "network.target" "docker.service" ];
         serviceConfig = {
@@ -867,7 +924,7 @@ in {
             ${optionalString (cfg.etcd.keyFile != null)
               "--etcd-keyfile=${cfg.etcd.keyFile}"} \
             --insecure-port=${toString cfg.apiserver.port} \
-            --bind-address=0.0.0.0 \
+            --bind-address=${cfg.apiserver.bindAddress} \
             ${optionalString (cfg.apiserver.advertiseAddress != null)
               "--advertise-address=${cfg.apiserver.advertiseAddress}"} \
             --allow-privileged=${boolToString cfg.apiserver.allowPrivileged}\
@@ -895,11 +952,15 @@ in {
                 (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.apiserver.authorizationPolicy)
               }"
             } \
+            ${optionalString (elem "Webhook" cfg.apiserver.authorizationMode)
+              "--authorization-webhook-config-file=${cfg.apiserver.webhookConfig}"
+            } \
             --secure-port=${toString cfg.apiserver.securePort} \
             --service-cluster-ip-range=${cfg.apiserver.serviceClusterIpRange} \
             ${optionalString (cfg.apiserver.runtimeConfig != "")
               "--runtime-config=${cfg.apiserver.runtimeConfig}"} \
-            --admission_control=${concatStringsSep "," cfg.apiserver.admissionControl} \
+            --enable-admission-plugins=${concatStringsSep "," cfg.apiserver.enableAdmissionPlugins} \
+            --disable-admission-plugins=${concatStringsSep "," cfg.apiserver.disableAdmissionPlugins} \
             ${optionalString (cfg.apiserver.serviceAccountKeyFile!=null)
               "--service-account-key-file=${cfg.apiserver.serviceAccountKeyFile}"} \
             ${optionalString cfg.verbose "--v=6"} \
diff --git a/nixos/modules/services/cluster/kubernetes/dns.nix b/nixos/modules/services/cluster/kubernetes/dns.nix
index 226fdadffd1a..43bbb50a48d4 100644
--- a/nixos/modules/services/cluster/kubernetes/dns.nix
+++ b/nixos/modules/services/cluster/kubernetes/dns.nix
@@ -3,26 +3,7 @@
 with lib;
 
 let
-  version = "1.14.4";
-
-  k8s-dns-kube-dns = pkgs.dockerTools.pullImage {
-    imageName = "gcr.io/google_containers/k8s-dns-kube-dns-amd64";
-    imageTag = version;
-    sha256 = "0q97xfqrigrfjl2a9cxl5in619py0zv44gch09jm8gqjkxl80imp";
-  };
-
-  k8s-dns-dnsmasq-nanny = pkgs.dockerTools.pullImage {
-    imageName = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64";
-    imageTag = version;
-    sha256 = "051w5ca4qb88mwva4hbnh9xzlsvv7k1mbk3wz50lmig2mqrqqx6c";
-  };
-
-  k8s-dns-sidecar = pkgs.dockerTools.pullImage {
-    imageName = "gcr.io/google_containers/k8s-dns-sidecar-amd64";
-    imageTag = version;
-    sha256 = "1z0d129bcm8i2cqq36x5jhnrv9hirj8c6kjrmdav8vgf7py78vsm";
-  };
-
+  version = "1.14.10";
   cfg = config.services.kubernetes.addons.dns;
 in {
   options.services.kubernetes.addons.dns = {
@@ -45,18 +26,51 @@ in {
       default = "cluster.local";
       type = types.str;
     };
+
+    kube-dns = mkOption {
+      description = "Docker image to seed for the kube-dns main container.";
+      type = types.attrs;
+      default = {
+        imageName = "k8s.gcr.io/k8s-dns-kube-dns-amd64";
+        imageDigest = "sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8";
+        finalImageTag = version;
+        sha256 = "0x583znk9smqn0fix7ld8sm5jgaxhqhx3fq97b1wkqm7iwhvl3pj";
+      };
+    };
+
+    dnsmasq-nanny = mkOption {
+      description = "Docker image to seed for the kube-dns dnsmasq container.";
+      type = types.attrs;
+      default = {
+        imageName = "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64";
+        imageDigest = "sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8";
+        finalImageTag = version;
+        sha256 = "1fihml7s2mfwgac51cbqpylkwbivc8nyhgi4vb820s83zvl8a6y1";
+      };
+    };
+
+    sidecar = mkOption {
+      description = "Docker image to seed for the kube-dns sidecar container.";
+      type = types.attrs;
+      default = {
+        imageName = "k8s.gcr.io/k8s-dns-sidecar-amd64";
+        imageDigest = "sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4";
+        finalImageTag = version;
+        sha256 = "08l1bv5jgrhvjzpqpbinrkgvv52snc4fzyd8ya9v18ns2klyz7m0";
+      };
+    };
   };
 
   config = mkIf cfg.enable {
-    services.kubernetes.kubelet.seedDockerImages = [
-      k8s-dns-kube-dns
-      k8s-dns-dnsmasq-nanny
-      k8s-dns-sidecar
+    services.kubernetes.kubelet.seedDockerImages = with pkgs.dockerTools; [
+      (pullImage cfg.kube-dns)
+      (pullImage cfg.dnsmasq-nanny)
+      (pullImage cfg.sidecar)
     ];
 
     services.kubernetes.addonManager.addons = {
       kubedns-deployment = {
-        apiVersion = "apps/v1beta1";
+        apiVersion = "extensions/v1beta1";
         kind = "Deployment";
         metadata = {
           labels = {
@@ -81,9 +95,38 @@ in {
               labels.k8s-app = "kube-dns";
             };
             spec = {
+              priorityClassName = "system-cluster-critical";
               containers = [
                 {
                   name = "kubedns";
+                  image = with cfg.kube-dns; "${imageName}:${finalImageTag}";
+                  resources = {
+                    limits.memory = "170Mi";
+                    requests = {
+                      cpu = "100m";
+                      memory = "70Mi";
+                    };
+                  };
+                  livenessProbe = {
+                    failureThreshold = 5;
+                    httpGet = {
+                      path = "/healthcheck/kubedns";
+                      port = 10054;
+                      scheme = "HTTP";
+                    };
+                    initialDelaySeconds = 60;
+                    successThreshold = 1;
+                    timeoutSeconds = 5;
+                  };
+                  readinessProbe = {
+                    httpGet = {
+                      path = "/readiness";
+                      port = 8081;
+                      scheme = "HTTP";
+                    };
+                    initialDelaySeconds = 3;
+                    timeoutSeconds = 5;
+                  };
                   args = [
                     "--domain=${cfg.clusterDomain}"
                     "--dns-port=10053"
@@ -96,18 +139,6 @@ in {
                       value = "10055";
                     }
                   ];
-                  image = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:${version}";
-                  livenessProbe = {
-                    failureThreshold = 5;
-                    httpGet = {
-                      path = "/healthcheck/kubedns";
-                      port = 10054;
-                      scheme = "HTTP";
-                    };
-                    initialDelaySeconds = 60;
-                    successThreshold = 1;
-                    timeoutSeconds = 5;
-                  };
                   ports = [
                     {
                       containerPort = 10053;
@@ -125,22 +156,6 @@ in {
                       protocol = "TCP";
                     }
                   ];
-                  readinessProbe = {
-                    httpGet = {
-                      path = "/readiness";
-                      port = 8081;
-                      scheme = "HTTP";
-                    };
-                    initialDelaySeconds = 3;
-                    timeoutSeconds = 5;
-                  };
-                  resources = {
-                    limits.memory = "170Mi";
-                    requests = {
-                      cpu = "100m";
-                      memory = "70Mi";
-                    };
-                  };
                   volumeMounts = [
                     {
                       mountPath = "/kube-dns-config";
@@ -149,6 +164,19 @@ in {
                   ];
                 }
                 {
+                  name = "dnsmasq";
+                  image = with cfg.dnsmasq-nanny; "${imageName}:${finalImageTag}";
+                  livenessProbe = {
+                    httpGet = {
+                      path = "/healthcheck/dnsmasq";
+                      port = 10054;
+                      scheme = "HTTP";
+                    };
+                    initialDelaySeconds = 60;
+                    timeoutSeconds = 5;
+                    successThreshold = 1;
+                    failureThreshold = 5;
+                  };
                   args = [
                     "-v=2"
                     "-logtostderr"
@@ -162,19 +190,6 @@ in {
                     "--server=/in-addr.arpa/127.0.0.1#10053"
                     "--server=/ip6.arpa/127.0.0.1#10053"
                   ];
-                  image = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:${version}";
-                  livenessProbe = {
-                    failureThreshold = 5;
-                    httpGet = {
-                      path = "/healthcheck/dnsmasq";
-                      port = 10054;
-                      scheme = "HTTP";
-                    };
-                    initialDelaySeconds = 60;
-                    successThreshold = 1;
-                    timeoutSeconds = 5;
-                  };
-                  name = "dnsmasq";
                   ports = [
                     {
                       containerPort = 53;
@@ -202,24 +217,24 @@ in {
                 }
                 {
                   name = "sidecar";
-                  image = "gcr.io/google_containers/k8s-dns-sidecar-amd64:${version}";
-                  args = [
-                    "--v=2"
-                    "--logtostderr"
-                    "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
-                    "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
-                  ];
+                  image = with cfg.sidecar; "${imageName}:${finalImageTag}";
                   livenessProbe = {
-                    failureThreshold = 5;
                     httpGet = {
                       path = "/metrics";
                       port = 10054;
                       scheme = "HTTP";
                     };
                     initialDelaySeconds = 60;
-                    successThreshold = 1;
                     timeoutSeconds = 5;
+                    successThreshold = 1;
+                    failureThreshold = 5;
                   };
+                  args = [
+                    "--v=2"
+                    "--logtostderr"
+                    "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
+                    "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
+                  ];
                   ports = [
                     {
                       containerPort = 10054;
diff --git a/nixos/modules/services/computing/slurm/slurm.nix b/nixos/modules/services/computing/slurm/slurm.nix
index 45d34f5b76f5..1e1c5bc9f035 100644
--- a/nixos/modules/services/computing/slurm/slurm.nix
+++ b/nixos/modules/services/computing/slurm/slurm.nix
@@ -6,20 +6,36 @@ let
 
   cfg = config.services.slurm;
   # configuration file can be generated by http://slurm.schedmd.com/configurator.html
-  configFile = pkgs.writeText "slurm.conf"
+  configFile = pkgs.writeTextDir "slurm.conf"
     ''
       ${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
       ${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
       ${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
       ${optionalString (cfg.partitionName != null) ''partitionName=${cfg.partitionName}''}
       PlugStackConfig=${plugStackConfig}
+      ProctrackType=${cfg.procTrackType}
       ${cfg.extraConfig}
     '';
 
-  plugStackConfig = pkgs.writeText "plugstack.conf"
+  plugStackConfig = pkgs.writeTextDir "plugstack.conf"
     ''
       ${optionalString cfg.enableSrunX11 ''optional ${pkgs.slurm-spank-x11}/lib/x11.so''}
+      ${cfg.extraPlugstackConfig}
     '';
+
+
+  cgroupConfig = pkgs.writeTextDir "cgroup.conf"
+   ''
+     ${cfg.extraCgroupConfig}
+   '';
+
+  # slurm expects some additional config files to be
+  # in the same directory as slurm.conf
+  etcSlurm = pkgs.symlinkJoin {
+    name = "etc-slurm";
+    paths = [ configFile cgroupConfig plugStackConfig ];
+  };
+
 in
 
 {
@@ -31,13 +47,31 @@ in
     services.slurm = {
 
       server = {
-        enable = mkEnableOption "slurm control daemon";
-
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = ''
+            Wether to enable the slurm control daemon.
+            Note that the standard authentication method is "munge".
+            The "munge" service needs to be provided with a password file in order for
+            slurm to work properly (see <literal>services.munge.password</literal>).
+          '';
+        };
       };
 
       client = {
-        enable = mkEnableOption "slurm rlient daemon";
+        enable = mkEnableOption "slurm client daemon";
+      };
 
+      enableStools = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Wether to provide a slurm.conf file.
+          Enable this option if you do not run a slurm daemon on this host
+          (i.e. <literal>server.enable</literal> and <literal>client.enable</literal> are <literal>false</literal>)
+          but you still want to run slurm commands from this host.
+        '';
       };
 
       package = mkOption {
@@ -88,7 +122,7 @@ in
         example = "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP";
         description = ''
           Name by which the partition may be referenced. Note that now you have
-          to write patrition's parameters after the name.
+          to write the partition's parameters after the name.
         '';
       };
 
@@ -98,8 +132,20 @@ in
         description = ''
           If enabled srun will accept the option "--x11" to allow for X11 forwarding
           from within an interactive session or a batch job. This activates the
-          slurm-spank-x11 module. Note that this requires 'services.openssh.forwardX11'
-          to be enabled on the compute nodes.
+          slurm-spank-x11 module. Note that this option also enables
+          'services.openssh.forwardX11' on the client.
+
+          This option requires slurm to be compiled without native X11 support.
+        '';
+      };
+
+      procTrackType = mkOption {
+        type = types.string;
+        default = "proctrack/linuxproc";
+        description = ''
+          Plugin to be used for process tracking on a job step basis.
+          The slurmd daemon uses this mechanism to identify all processes
+          which are children of processes it spawns for a user job step.
         '';
       };
 
@@ -111,6 +157,23 @@ in
           the end of the slurm configuration file.
         '';
       };
+
+      extraPlugstackConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Extra configuration that will be added to the end of <literal>plugstack.conf</literal>.
+        '';
+      };
+
+      extraCgroupConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = ''
+          Extra configuration for <literal>cgroup.conf</literal>. This file is
+          used when <literal>procTrackType=proctrack/cgroup</literal>.
+        '';
+      };
     };
 
   };
@@ -123,8 +186,6 @@ in
       wrappedSlurm = pkgs.stdenv.mkDerivation {
         name = "wrappedSlurm";
 
-        propagatedBuildInputs = [ cfg.package configFile ];
-
         builder = pkgs.writeText "builder.sh" ''
           source $stdenv/setup
           mkdir -p $out/bin
@@ -136,20 +197,25 @@ in
           #!/bin/sh
           if [ -z "$SLURM_CONF" ]
           then
-            SLURM_CONF="${configFile}" "$EXE" "\$@"
+            SLURM_CONF="${etcSlurm}/slurm.conf" "$EXE" "\$@"
           else
             "$EXE" "\$0"
           fi
           EOT
             chmod +x "$wrappername"
           done
+
+          mkdir -p $out/share
+          ln -s ${getBin cfg.package}/share/man $out/share/man
         '';
       };
 
-  in mkIf (cfg.client.enable || cfg.server.enable) {
+  in mkIf (cfg.enableStools || cfg.client.enable || cfg.server.enable) {
 
     environment.systemPackages = [ wrappedSlurm ];
 
+    services.munge.enable = mkDefault true;
+
     systemd.services.slurmd = mkIf (cfg.client.enable) {
       path = with pkgs; [ wrappedSlurm coreutils ]
         ++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
@@ -169,6 +235,8 @@ in
       '';
     };
 
+    services.openssh.forwardX11 = mkIf cfg.client.enable (mkDefault true);
+
     systemd.services.slurmctld = mkIf (cfg.server.enable) {
       path = with pkgs; [ wrappedSlurm munge coreutils ]
         ++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
diff --git a/nixos/modules/services/databases/mysql.nix b/nixos/modules/services/databases/mysql.nix
index 66c9330c3550..15b9c788e872 100644
--- a/nixos/modules/services/databases/mysql.nix
+++ b/nixos/modules/services/databases/mysql.nix
@@ -231,8 +231,10 @@ in
 
     environment.systemPackages = [mysql];
 
-    systemd.services.mysql =
-      { description = "MySQL Server";
+    systemd.services.mysql = let
+      hasNotify = (cfg.package == pkgs.mariadb);
+    in {
+        description = "MySQL Server";
 
         after = [ "network.target" ];
         wantedBy = [ "multi-user.target" ];
@@ -256,17 +258,16 @@ in
 
             mkdir -m 0755 -p ${cfg.pidDir}
             chown -R ${cfg.user} ${cfg.pidDir}
-
-            # Make the socket directory
-            mkdir -p /run/mysqld
-            chmod 0755 /run/mysqld
-            chown -R ${cfg.user} /run/mysqld
           '';
 
-        serviceConfig.ExecStart = "${mysql}/bin/mysqld --defaults-extra-file=${myCnf} ${mysqldOptions}";
+        serviceConfig = {
+          Type = if hasNotify then "notify" else "simple";
+          RuntimeDirectory = "mysqld";
+          ExecStart = "${mysql}/bin/mysqld --defaults-extra-file=${myCnf} ${mysqldOptions}";
+        };
 
-        postStart =
-          ''
+        postStart = ''
+          ${lib.optionalString (!hasNotify) ''
             # Wait until the MySQL server is available for use
             count=0
             while [ ! -e /run/mysqld/mysqld.sock ]
@@ -281,6 +282,7 @@ in
                 count=$((count++))
                 sleep 1
             done
+          ''}
 
             if [ -f /tmp/mysql_init ]
             then
diff --git a/nixos/modules/services/databases/pgmanage.nix b/nixos/modules/services/databases/pgmanage.nix
index d1b48c06440e..1a34c7f5ecee 100644
--- a/nixos/modules/services/databases/pgmanage.nix
+++ b/nixos/modules/services/databases/pgmanage.nix
@@ -41,7 +41,9 @@ let
 
   pgmanage = "pgmanage";
 
-  pgmanageOptions = {
+in {
+
+  options.services.pgmanage = {
     enable = mkEnableOption "PostgreSQL Administration for the web";
 
     package = mkOption {
@@ -176,47 +178,29 @@ let
     };
   };
 
-
-in {
-
-  options.services.pgmanage = pgmanageOptions;
-
-  # This is deprecated and should be removed for NixOS-18.03.
-  options.services.postage = pgmanageOptions;
-
-  config = mkMerge [
-    { assertions = [
-        { assertion = !config.services.postage.enable;
-          message =
-            "services.postage is deprecated in favour of pgmanage. " +
-            "They have the same options so just substitute postage for pgmanage." ;
-        }
-      ];
-    }
-    (mkIf cfg.enable {
-      systemd.services.pgmanage = {
-        description = "pgmanage - PostgreSQL Administration for the web";
-        wants    = [ "postgresql.service" ];
-        after    = [ "postgresql.service" ];
-        wantedBy = [ "multi-user.target" ];
-        serviceConfig = {
-          User         = pgmanage;
-          Group        = pgmanage;
-          ExecStart    = "${pkgs.pgmanage}/sbin/pgmanage -c ${confFile}" +
-                         optionalString cfg.localOnly " --local-only=true";
-        };
+  config = mkIf cfg.enable {
+    systemd.services.pgmanage = {
+      description = "pgmanage - PostgreSQL Administration for the web";
+      wants    = [ "postgresql.service" ];
+      after    = [ "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User         = pgmanage;
+        Group        = pgmanage;
+        ExecStart    = "${pkgs.pgmanage}/sbin/pgmanage -c ${confFile}" +
+                       optionalString cfg.localOnly " --local-only=true";
       };
-      users = {
-        users."${pgmanage}" = {
-          name  = pgmanage;
-          group = pgmanage;
-          home  = cfg.sqlRoot;
-          createHome = true;
-        };
-        groups."${pgmanage}" = {
-          name = pgmanage;
-        };
+    };
+    users = {
+      users."${pgmanage}" = {
+        name  = pgmanage;
+        group = pgmanage;
+        home  = cfg.sqlRoot;
+        createHome = true;
       };
-    })
-  ];
+      groups."${pgmanage}" = {
+        name = pgmanage;
+      };
+    };
+  };
 }
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 4ad4728ccda6..42d61fa1b368 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -24,14 +24,13 @@ let
 
   postgresql = postgresqlAndPlugins cfg.package;
 
-  flags = optional cfg.enableTCPIP "-i";
-
   # The main PostgreSQL configuration file.
   configFile = pkgs.writeText "postgresql.conf"
     ''
       hba_file = '${pkgs.writeText "pg_hba.conf" cfg.authentication}'
       ident_file = '${pkgs.writeText "pg_ident.conf" cfg.identMap}'
       log_destination = 'stderr'
+      listen_addresses = '${if cfg.enableTCPIP then "*" else "localhost"}'
       port = ${toString cfg.port}
       ${cfg.extraConfig}
     '';
@@ -229,7 +228,7 @@ in
                 "${cfg.dataDir}/recovery.conf"
             ''}
 
-             exec postgres ${toString flags}
+             exec postgres
           '';
 
         serviceConfig =
diff --git a/nixos/modules/services/desktops/flatpak.nix b/nixos/modules/services/desktops/flatpak.nix
index 024dc65629a8..cfca1893bd82 100644
--- a/nixos/modules/services/desktops/flatpak.nix
+++ b/nixos/modules/services/desktops/flatpak.nix
@@ -40,12 +40,12 @@ in {
 
     systemd.packages = [ pkgs.flatpak pkgs.xdg-desktop-portal ] ++ cfg.extraPortals;
 
-    environment.variables = {
-      PATH = [
-        "$HOME/.local/share/flatpak/exports/bin"
-        "/var/lib/flatpak/exports/bin"
-      ];
+    environment.profiles = [
+      "$HOME/.local/share/flatpak/exports"
+      "/var/lib/flatpak/exports"
+    ];
 
+    environment.variables = {
       XDG_DESKTOP_PORTAL_PATH = map (p: "${p}/share/xdg-desktop-portal/portals") cfg.extraPortals;
     };
   };
diff --git a/nixos/modules/services/development/bloop.nix b/nixos/modules/services/development/bloop.nix
new file mode 100644
index 000000000000..56904b7c40e6
--- /dev/null
+++ b/nixos/modules/services/development/bloop.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.bloop;
+
+in {
+
+  options.services.bloop = {
+    install = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to install a user service for the Bloop server.
+
+        The service must be manually started for each user with
+        "systemctl --user start bloop".
+      '';
+    };
+  };
+
+  config = mkIf (cfg.install) {
+    systemd.user.services.bloop = {
+      description = "Bloop Scala build server";
+
+      serviceConfig = {
+        Type      = "simple";
+        ExecStart = ''${pkgs.bloop}/bin/blp-server'';
+        Restart   = "always";
+      };
+    };
+
+    environment.systemPackages = [ pkgs.bloop ];
+  };
+}
diff --git a/nixos/modules/services/logging/journaldriver.nix b/nixos/modules/services/logging/journaldriver.nix
new file mode 100644
index 000000000000..74ac3d4c2365
--- /dev/null
+++ b/nixos/modules/services/logging/journaldriver.nix
@@ -0,0 +1,112 @@
+# This module implements a systemd service for running journaldriver,
+# a log forwarding agent that sends logs from journald to Stackdriver
+# Logging.
+#
+# It can be enabled without extra configuration when running on GCP.
+# On machines hosted elsewhere, the other configuration options need
+# to be set.
+#
+# For further information please consult the documentation in the
+# upstream repository at: https://github.com/aprilabank/journaldriver/
+
+{ config, lib, pkgs, ...}:
+
+with lib; let cfg = config.services.journaldriver;
+in {
+  options.services.journaldriver = {
+    enable = mkOption {
+      type        = types.bool;
+      default     = false;
+      description = ''
+        Whether to enable journaldriver to forward journald logs to
+        Stackdriver Logging.
+      '';
+    };
+
+    logLevel = mkOption {
+      type        = types.str;
+      default     = "info";
+      description = ''
+        Log level at which journaldriver logs its own output.
+      '';
+    };
+
+    logName = mkOption {
+      type        = with types; nullOr str;
+      default     = null;
+      description = ''
+        Configures the name of the target log in Stackdriver Logging.
+        This option can be set to, for example, the hostname of a
+        machine to improve the user experience in the logging
+        overview.
+      '';
+    };
+
+    googleCloudProject = mkOption {
+      type        = with types; nullOr str;
+      default     = null;
+      description = ''
+        Configures the name of the Google Cloud project to which to
+        forward journald logs.
+
+        This option is required on non-GCP machines, but should not be
+        set on GCP instances.
+      '';
+    };
+
+    logStream = mkOption {
+      type        = with types; nullOr str;
+      default     = null;
+      description = ''
+        Configures the name of the Stackdriver Logging log stream into
+        which to write journald entries.
+
+        This option is required on non-GCP machines, but should not be
+        set on GCP instances.
+      '';
+    };
+
+    applicationCredentials = mkOption {
+      type        = with types; nullOr path;
+      default     = null;
+      description = ''
+        Path to the service account private key (in JSON-format) used
+        to forward log entries to Stackdriver Logging on non-GCP
+        instances.
+
+        This option is required on non-GCP machines, but should not be
+        set on GCP instances.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.journaldriver = {
+      description = "Stackdriver Logging journal forwarder";
+      script      = "${pkgs.journaldriver}/bin/journaldriver";
+      after       = [ "network-online.target" ];
+      wantedBy    = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Restart        = "always";
+        DynamicUser    = true;
+
+        # This directive lets systemd automatically configure
+        # permissions on /var/lib/journaldriver, the directory in
+        # which journaldriver persists its cursor state.
+        StateDirectory = "journaldriver";
+
+        # This group is required for accessing journald.
+        SupplementaryGroups = "systemd-journal";
+      };
+
+      environment = {
+        RUST_LOG                       = cfg.logLevel;
+        LOG_NAME                       = cfg.logName;
+        LOG_STREAM                     = cfg.logStream;
+        GOOGLE_CLOUD_PROJECT           = cfg.googleCloudProject;
+        GOOGLE_APPLICATION_CREDENTIALS = cfg.applicationCredentials;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/mail/exim.nix b/nixos/modules/services/mail/exim.nix
index 440eae281f40..f9ee3f909660 100644
--- a/nixos/modules/services/mail/exim.nix
+++ b/nixos/modules/services/mail/exim.nix
@@ -94,6 +94,7 @@ in
     systemd.services.exim = {
       description = "Exim Mail Daemon";
       wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ config.environment.etc."exim.conf".source ];
       serviceConfig = {
         ExecStart   = "${exim}/bin/exim -bdf -q30m";
         ExecReload  = "${coreutils}/bin/kill -HUP $MAINPID";
diff --git a/nixos/modules/services/mail/opensmtpd.nix b/nixos/modules/services/mail/opensmtpd.nix
index 53acdba42457..f9b890532ceb 100644
--- a/nixos/modules/services/mail/opensmtpd.nix
+++ b/nixos/modules/services/mail/opensmtpd.nix
@@ -10,7 +10,7 @@ let
 
   sendmail = pkgs.runCommand "opensmtpd-sendmail" {} ''
     mkdir -p $out/bin
-    ln -s ${pkgs.opensmtpd}/sbin/smtpctl $out/bin/sendmail
+    ln -s ${cfg.package}/sbin/smtpctl $out/bin/sendmail
   '';
 
 in {
@@ -27,6 +27,13 @@ in {
         description = "Whether to enable the OpenSMTPD server.";
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.opensmtpd;
+        defaultText = "pkgs.opensmtpd";
+        description = "The OpenSMTPD package to use.";
+      };
+
       addSendmailToSystemPath = mkOption {
         type = types.bool;
         default = true;
@@ -97,7 +104,7 @@ in {
     systemd.services.opensmtpd = let
       procEnv = pkgs.buildEnv {
         name = "opensmtpd-procs";
-        paths = [ pkgs.opensmtpd ] ++ cfg.procPackages;
+        paths = [ cfg.package ] ++ cfg.procPackages;
         pathsToLink = [ "/libexec/opensmtpd" ];
       };
     in {
@@ -115,7 +122,7 @@ in {
         chown smtpq.root /var/spool/smtpd/purge
         chmod 700 /var/spool/smtpd/purge
       '';
-      serviceConfig.ExecStart = "${pkgs.opensmtpd}/sbin/smtpd -d -f ${conf} ${args}";
+      serviceConfig.ExecStart = "${cfg.package}/sbin/smtpd -d -f ${conf} ${args}";
       environment.OPENSMTPD_PROC_PATH = "${procEnv}/libexec/opensmtpd";
     };
 
diff --git a/nixos/modules/services/misc/docker-registry.nix b/nixos/modules/services/misc/docker-registry.nix
index 45931cb42b54..f628da4ac4c0 100644
--- a/nixos/modules/services/misc/docker-registry.nix
+++ b/nixos/modules/services/misc/docker-registry.nix
@@ -42,7 +42,7 @@ let
     };
   };
 
-  configFile = pkgs.writeText "docker-registry-config.yml" (builtins.toJSON (registryConfig // cfg.extraConfig));
+  configFile = pkgs.writeText "docker-registry-config.yml" (builtins.toJSON (recursiveUpdate registryConfig cfg.extraConfig));
 
 in {
   options.services.dockerRegistry = {
@@ -91,7 +91,7 @@ in {
         Docker extra registry configuration via environment variables.
       '';
       default = {};
-      type = types.attrsOf types.str;
+      type = types.attrs;
     };
 
     enableGarbageCollect = mkEnableOption "garbage collect";
@@ -120,6 +120,7 @@ in {
       serviceConfig = {
         User = "docker-registry";
         WorkingDirectory = cfg.storagePath;
+        AmbientCapabilities = mkIf (cfg.port < 1024) "cap_net_bind_service";
       };
     };
 
diff --git a/nixos/modules/services/misc/gitea.nix b/nixos/modules/services/misc/gitea.nix
index 2d0f66de037d..46efc1df12eb 100644
--- a/nixos/modules/services/misc/gitea.nix
+++ b/nixos/modules/services/misc/gitea.nix
@@ -282,7 +282,7 @@ in
 
         mkdir -p ${cfg.repositoryRoot}
         # update all hooks' binary paths
-        HOOKS=$(find ${cfg.repositoryRoot} -mindepth 4 -maxdepth 4 -type f -wholename "*git/hooks/*")
+        HOOKS=$(find ${cfg.repositoryRoot} -mindepth 4 -maxdepth 5 -type f -wholename "*git/hooks/*")
         if [ "$HOOKS" ]
         then
           sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${gitea.bin}/bin/gitea,g' $HOOKS
@@ -295,6 +295,11 @@ in
           mkdir -p ${cfg.stateDir}/conf
           cp -r ${gitea.out}/locale ${cfg.stateDir}/conf/locale
         fi
+        # update command option in authorized_keys
+        if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
+        then
+          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${gitea.bin}/bin/gitea,g' ${cfg.stateDir}/.ssh/authorized_keys
+        fi
       '' + optionalString (usePostgresql && cfg.database.createDatabase) ''
         if ! test -e "${cfg.stateDir}/db-created"; then
           echo "CREATE ROLE ${cfg.database.user}
@@ -351,7 +356,7 @@ in
         text = cfg.database.password;
       })));
 
-    systemd.services.gitea-dump = {
+    systemd.services.gitea-dump = mkIf cfg.dump.enable {
        description = "gitea dump";
        after = [ "gitea.service" ];
        wantedBy = [ "default.target" ];
@@ -371,7 +376,7 @@ in
        };
     };
 
-    systemd.timers.gitea-dump = {
+    systemd.timers.gitea-dump = mkIf cfg.dump.enable {
       description = "Update timer for gitea-dump";
       partOf = [ "gitea-dump.service" ];
       wantedBy = [ "timers.target" ];
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index be13fed860bd..e80abf96da48 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -129,6 +129,7 @@ let
         };
       };
       extra = {};
+      uploads.storage_path = cfg.statePath;
     };
   };
 
@@ -565,13 +566,9 @@ in {
 
         ${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/config/gitlab_shell_secret
 
-        # The uploads directory is hardcoded somewhere deep in rails. It is
-        # symlinked in the gitlab package to /run/gitlab/uploads to make it
-        # configurable
         mkdir -p /run/gitlab
-        mkdir -p ${cfg.statePath}/{log,uploads}
+        mkdir -p ${cfg.statePath}/log
         ln -sf ${cfg.statePath}/log /run/gitlab/log
-        ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads
         ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp
         ln -sf $GITLAB_SHELL_CONFIG_PATH /run/gitlab/shell-config.yml
         chown -R ${cfg.user}:${cfg.group} /run/gitlab
@@ -587,6 +584,8 @@ in {
           ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
         ''}
         ln -sf ${cfg.statePath}/config /run/gitlab/config
+        rm ${cfg.statePath}/lib
+        ln -sf ${pkgs.gitlab}/share/gitlab/lib ${cfg.statePath}/lib
         cp ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
 
         # JSON is a subset of YAML
@@ -638,10 +637,6 @@ in {
         chmod -R ug+rwX,o-rwx ${cfg.statePath}/repositories
         chmod -R ug-s ${cfg.statePath}/repositories
         find ${cfg.statePath}/repositories -type d -print0 | xargs -0 chmod g+s
-        chmod 770 ${cfg.statePath}/uploads
-        chown -R ${cfg.user} ${cfg.statePath}/uploads
-        find ${cfg.statePath}/uploads -type f -exec chmod 0644 {} \;
-        find ${cfg.statePath}/uploads -type d -not -path ${cfg.statePath}/uploads -exec chmod 0770 {} \;
       '';
 
       serviceConfig = {
diff --git a/nixos/modules/services/misc/home-assistant.nix b/nixos/modules/services/misc/home-assistant.nix
index 1dc7b44ee37b..05555353f207 100644
--- a/nixos/modules/services/misc/home-assistant.nix
+++ b/nixos/modules/services/misc/home-assistant.nix
@@ -128,9 +128,17 @@ in {
         you might need to specify it in <literal>extraPackages</literal>.
       '';
     };
+
+    openFirewall = mkOption {
+      default = false;
+      type = types.bool;
+      description = "Whether to open the firewall for the specified port.";
+    };
   };
 
   config = mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
     systemd.services.home-assistant = {
       description = "Home Assistant";
       after = [ "network.target" ];
diff --git a/nixos/modules/services/misc/nix-daemon.nix b/nixos/modules/services/misc/nix-daemon.nix
index 8b940d71ebee..0ee105e4c6f1 100644
--- a/nixos/modules/services/misc/nix-daemon.nix
+++ b/nixos/modules/services/misc/nix-daemon.nix
@@ -33,7 +33,7 @@ let
       sh = pkgs.runtimeShell;
       binshDeps = pkgs.writeReferencesToFile sh;
     in
-      pkgs.runCommand "nix.conf" { extraOptions = cfg.extraOptions; } ''
+      pkgs.runCommand "nix.conf" { extraOptions = cfg.extraOptions; } (''
         ${optionalString (!isNix20) ''
           extraPaths=$(for i in $(cat ${binshDeps}); do if test -d $i; then echo $i; fi; done)
         ''}
@@ -62,7 +62,11 @@ let
         ''}
         $extraOptions
         END
-      '';
+      '' + optionalString cfg.checkConfig ''
+        echo "Checking that Nix can read nix.conf..."
+        ln -s $out ./nix.conf
+        NIX_CONF_DIR=$PWD ${cfg.package}/bin/nix show-config >/dev/null
+      '');
 
 in
 
@@ -126,11 +130,13 @@ in
         default = false;
         description = "
           If set, Nix will perform builds in a sandboxed environment that it
-          will set up automatically for each build.  This prevents
-          impurities in builds by disallowing access to dependencies
-          outside of the Nix store. This isn't enabled by default for
-          performance. It doesn't affect derivation hashes, so changing
-          this option will not trigger a rebuild of packages.
+          will set up automatically for each build. This prevents impurities
+          in builds by disallowing access to dependencies outside of the Nix 
+          store by using network and mount namespaces in a chroot environment. 
+          This isn't enabled by default for possible performance impacts due to 
+          the initial setup time of a sandbox for each build. It doesn't affect 
+          derivation hashes, so changing this option will not trigger a rebuild
+          of packages.
         ";
       };
 
@@ -351,6 +357,13 @@ in
         '';
       };
 
+      checkConfig = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          If enabled (the default), checks that Nix can parse the generated nix.conf.
+        '';
+      };
     };
 
   };
diff --git a/nixos/modules/services/misc/nixos-manual.nix b/nixos/modules/services/misc/nixos-manual.nix
index 4bd1c20edf71..3916c3052e8b 100644
--- a/nixos/modules/services/misc/nixos-manual.nix
+++ b/nixos/modules/services/misc/nixos-manual.nix
@@ -99,7 +99,7 @@ in
 
     services.nixosManual.browser = mkOption {
       type = types.path;
-      default = "${pkgs.w3m-nox}/bin/w3m";
+      default = "${pkgs.w3m-nographics}/bin/w3m";
       description = ''
         Browser used to show the manual.
       '';
diff --git a/nixos/modules/services/misc/xmr-stak.nix b/nixos/modules/services/misc/xmr-stak.nix
index 57f439365471..a87878c31e0d 100644
--- a/nixos/modules/services/misc/xmr-stak.nix
+++ b/nixos/modules/services/misc/xmr-stak.nix
@@ -10,9 +10,6 @@ let
     inherit (cfg) openclSupport cudaSupport;
   };
 
-  xmrConfArg = optionalString (cfg.configText != "") ("-c " +
-    pkgs.writeText "xmr-stak-config.txt" cfg.configText);
-
 in
 
 {
@@ -29,22 +26,34 @@ in
         description = "List of parameters to pass to xmr-stak.";
       };
 
-      configText = mkOption {
-        type = types.lines;
-        default = "";
-        example = ''
-          "currency" : "monero",
-          "pool_list" :
-            [ { "pool_address" : "pool.supportxmr.com:5555",
-                "wallet_address" : "<long-hash>",
-                "pool_password" : "minername",
-                "pool_weight" : 1,
-              },
-            ],
+      configFiles = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        example = literalExample ''
+          {
+            "config.txt" = '''
+              "verbose_level" : 4,
+              "h_print_time" : 60,
+              "tls_secure_algo" : true,
+            ''';
+            "pools.txt" = '''
+              "currency" : "monero7",
+              "pool_list" :
+              [ { "pool_address" : "pool.supportxmr.com:443",
+                  "wallet_address" : "my-wallet-address",
+                  "rig_id" : "",
+                  "pool_password" : "nixos",
+                  "use_nicehash" : false,
+                  "use_tls" : true,
+                  "tls_fingerprint" : "",
+                  "pool_weight" : 23
+                },
+              ],
+            ''';
+          }
         '';
         description = ''
-          Verbatim xmr-stak config.txt. If empty, the <literal>-c</literal>
-          parameter will not be added to the xmr-stak command.
+          Content of config files like config.txt, pools.txt or cpu.txt.
         '';
       };
     };
@@ -58,10 +67,13 @@ in
       environment = mkIf cfg.cudaSupport {
         LD_LIBRARY_PATH = "${pkgs.linuxPackages_latest.nvidia_x11}/lib";
       };
-      script = ''
-        exec ${pkg}/bin/xmr-stak ${xmrConfArg} ${concatStringsSep " " cfg.extraArgs}
-      '';
+
+      preStart = concatStrings (flip mapAttrsToList cfg.configFiles (fn: content: ''
+        ln -sf '${pkgs.writeText "xmr-stak-${fn}" content}' '${fn}'
+      ''));
+
       serviceConfig = let rootRequired = cfg.openclSupport || cfg.cudaSupport; in {
+        ExecStart = "${pkg}/bin/xmr-stak ${concatStringsSep " " cfg.extraArgs}";
         # xmr-stak generates cpu and/or gpu configuration files
         WorkingDirectory = "/tmp";
         PrivateTmp = true;
@@ -70,4 +82,12 @@ in
       };
     };
   };
+
+  imports = [
+    (mkRemovedOptionModule ["services" "xmr-stak" "configText"] ''
+      This option was removed in favour of `services.xmr-stak.configFiles`
+      because the new config file `pools.txt` was introduced. You are
+      now able to define all other config files like cpu.txt or amd.txt.
+    '')
+  ];
 }
diff --git a/nixos/modules/services/monitoring/dd-agent/dd-agent.nix b/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
index beaa2c01b298..6367c8245f71 100644
--- a/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
+++ b/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
@@ -57,7 +57,7 @@ let
     instances:
       - use_mount: no
   '';
-  
+
   networkConfig = pkgs.writeText "network.yaml" ''
     init_config:
 
@@ -68,13 +68,13 @@ let
           - lo
           - lo0
   '';
-  
+
   postgresqlConfig = pkgs.writeText "postgres.yaml" cfg.postgresqlConfig;
   nginxConfig = pkgs.writeText "nginx.yaml" cfg.nginxConfig;
   mongoConfig = pkgs.writeText "mongo.yaml" cfg.mongoConfig;
   jmxConfig = pkgs.writeText "jmx.yaml" cfg.jmxConfig;
   processConfig = pkgs.writeText "process.yaml" cfg.processConfig;
-  
+
   etcfiles =
     let
       defaultConfd = import ./dd-agent-defaults.nix;
@@ -150,7 +150,7 @@ in {
       default = null;
       type = types.uniq (types.nullOr types.string);
     };
-    
+
     mongoConfig = mkOption {
       description = "MongoDB integration configuration";
       default = null;
@@ -166,7 +166,7 @@ in {
     processConfig = mkOption {
       description = ''
         Process integration configuration
- 
+
         See http://docs.datadoghq.com/integrations/process/
       '';
       default = null;
@@ -190,7 +190,7 @@ in {
 
     systemd.services.dd-agent = {
       description = "Datadog agent monitor";
-      path = [ pkgs."dd-agent" pkgs.python pkgs.sysstat pkgs.procps ];
+      path = [ pkgs."dd-agent" pkgs.python pkgs.sysstat pkgs.procps pkgs.gohai ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         ExecStart = "${pkgs.dd-agent}/bin/dd-agent foreground";
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index 780448d8bad8..8d2c303a69e8 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -20,6 +20,7 @@ let
   exporterOpts = {
     blackbox  = import ./exporters/blackbox.nix  { inherit config lib pkgs; };
     collectd  = import ./exporters/collectd.nix  { inherit config lib pkgs; };
+    dnsmasq   = import ./exporters/dnsmasq.nix   { inherit config lib pkgs; };
     dovecot   = import ./exporters/dovecot.nix   { inherit config lib pkgs; };
     fritzbox  = import ./exporters/fritzbox.nix  { inherit config lib pkgs; };
     json      = import ./exporters/json.nix      { inherit config lib pkgs; };
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix b/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
new file mode 100644
index 000000000000..b1fab85109af
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
@@ -0,0 +1,39 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.dnsmasq;
+in
+{
+  port = 9153;
+  extraOpts = {
+    dnsmasqListenAddress = mkOption {
+      type = types.str;
+      default = "localhost:53";
+      description = ''
+        Address on which dnsmasq listens.
+      '';
+    };
+    leasesPath = mkOption {
+      type = types.path;
+      default = "/var/lib/misc/dnsmasq.leases";
+      example = "/var/lib/dnsmasq/dnsmasq.leases";
+      description = ''
+        Path to the <literal>dnsmasq.leases</literal> file.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-dnsmasq-exporter}/bin/dnsmasq_exporter \
+          --listen ${cfg.listenAddress}:${toString cfg.port} \
+          --dnsmasq ${cfg.dnsmasqListenAddress} \
+          --leases_path ${cfg.leasesPath} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/node.nix b/nixos/modules/services/monitoring/prometheus/exporters/node.nix
index c85f5f9cfb2d..ee7bf39f199a 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/node.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/node.nix
@@ -27,6 +27,7 @@ in
   };
   serviceOpts = {
     serviceConfig = {
+      RuntimeDirectory = "prometheus-node-exporter";
       ExecStart = ''
         ${pkgs.prometheus-node-exporter}/bin/node_exporter \
           ${concatMapStringsSep " " (x: "--collector." + x) cfg.enabledCollectors} \
diff --git a/nixos/modules/services/network-filesystems/beegfs.nix b/nixos/modules/services/network-filesystems/beegfs.nix
index a6a2ec6cbc36..182fabf6405f 100644
--- a/nixos/modules/services/network-filesystems/beegfs.nix
+++ b/nixos/modules/services/network-filesystems/beegfs.nix
@@ -31,7 +31,7 @@ let
     connPortShift = ${toString cfg.connPortShift}
     storeAllowFirstRunInit = false
 
-    ${cfg.mgmtd.extraConfig}
+    ${cfg.meta.extraConfig}
   '';
 
   configStorage = name: cfg: pkgs.writeText "storage-${name}.conf" ''
diff --git a/nixos/modules/services/network-filesystems/ipfs.nix b/nixos/modules/services/network-filesystems/ipfs.nix
index e2122ddb8ede..ab6d3a3d2fa4 100644
--- a/nixos/modules/services/network-filesystems/ipfs.nix
+++ b/nixos/modules/services/network-filesystems/ipfs.nix
@@ -186,6 +186,14 @@ in {
         default = [];
       };
 
+      localDiscovery = mkOption {
+        type = types.bool;
+        description = ''Whether to enable local discovery for the ipfs daemon.
+          This will allow ipfs to scan ports on your local network. Some hosting services will ban you if you do this.
+        '';
+        default = true;
+      };
+
       serviceFdlimit = mkOption {
         type = types.nullOr types.int;
         default = null;
@@ -232,7 +240,13 @@ in {
       '';
       script = ''
         if [[ ! -f ${cfg.dataDir}/config ]]; then
-          ipfs init ${optionalString cfg.emptyRepo "-e"}
+          ipfs init ${optionalString cfg.emptyRepo "-e"} \
+            ${optionalString (! cfg.localDiscovery) "--profile=server"}
+        else
+          ${if cfg.localDiscovery
+            then "ipfs config profile apply local-discovery"
+            else "ipfs config profile apply server"
+          }
         fi
       '';
 
diff --git a/nixos/modules/services/networking/bind.nix b/nixos/modules/services/networking/bind.nix
index 763283dfe7a2..7775a4bd87fe 100644
--- a/nixos/modules/services/networking/bind.nix
+++ b/nixos/modules/services/networking/bind.nix
@@ -27,6 +27,7 @@ let
         forwarders { ${concatMapStrings (entry: " ${entry}; ") cfg.forwarders} };
         directory "/var/run/named";
         pid-file "/var/run/named/named.pid";
+        ${cfg.extraOptions}
       };
 
       ${cfg.extraConfig}
@@ -141,6 +142,15 @@ in
         ";
       };
 
+      extraOptions = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra lines to be added verbatim to the options section of the
+          generated named configuration file.
+        '';
+      };
+
       configFile = mkOption {
         type = types.path;
         default = confFile;
diff --git a/nixos/modules/services/networking/chrony.nix b/nixos/modules/services/networking/chrony.nix
index 9bf266b38054..c287ca01feb5 100644
--- a/nixos/modules/services/networking/chrony.nix
+++ b/nixos/modules/services/networking/chrony.nix
@@ -109,7 +109,7 @@ in
         home = stateDir;
       };
 
-    systemd.services.timesyncd.enable = mkForce false;
+    services.timesyncd.enable = mkForce false;
 
     systemd.services.chronyd =
       { description = "chrony NTP daemon";
diff --git a/nixos/modules/services/networking/dnscrypt-proxy.nix b/nixos/modules/services/networking/dnscrypt-proxy.nix
index 6f5e7d8d456e..8edcf925dbfa 100644
--- a/nixos/modules/services/networking/dnscrypt-proxy.nix
+++ b/nixos/modules/services/networking/dnscrypt-proxy.nix
@@ -145,6 +145,9 @@ in
       }
     ];
 
+    # make man 8 dnscrypt-proxy work
+    environment.systemPackages = [ pkgs.dnscrypt-proxy ];
+
     users.users.dnscrypt-proxy = {
       description = "dnscrypt-proxy daemon user";
       isSystemUser = true;
diff --git a/nixos/modules/services/networking/dnsdist.nix b/nixos/modules/services/networking/dnsdist.nix
new file mode 100644
index 000000000000..12eee136e639
--- /dev/null
+++ b/nixos/modules/services/networking/dnsdist.nix
@@ -0,0 +1,61 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dnsdist;
+  configFile = pkgs.writeText "dndist.conf" ''
+    setLocal('${cfg.listenAddress}:${toString cfg.listenPort}')
+    ${cfg.extraConfig}
+    '';
+in {
+  options = {
+    services.dnsdist = {
+      enable = mkEnableOption "dnsdist domain name server";
+
+      listenAddress = mkOption {
+        type = types.str;
+        description = "Listen IP Address";
+        default = "0.0.0.0";
+      };
+      listenPort = mkOption {
+        type = types.int;
+        description = "Listen port";
+        default = 53;
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = ''
+        '';
+        description = ''
+          Extra lines to be added verbatim to dnsdist.conf.
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.services.dnsdist.enable {
+    systemd.services.dnsdist = {
+      description = "dnsdist load balancer";
+      wantedBy = [ "multi-user.target" ];
+      after = ["network.target"];
+
+      serviceConfig = {
+        Restart="on-failure";
+        RestartSec="1";
+        DynamicUser = true;
+        StartLimitInterval="0";
+        PrivateTmp=true;
+        PrivateDevices=true;
+        CapabilityBoundingSet="CAP_NET_BIND_SERVICE CAP_SETGID CAP_SETUID";
+        ExecStart = "${pkgs.dnsdist}/bin/dnsdist --supervised --disable-syslog --config ${configFile}";
+        ProtectSystem="full";
+        ProtectHome=true;
+        RestrictAddressFamilies="AF_UNIX AF_INET AF_INET6";
+        LimitNOFILE="16384";
+        TasksMax="8192";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/morty.nix b/nixos/modules/services/networking/morty.nix
new file mode 100644
index 000000000000..b31bec9a8627
--- /dev/null
+++ b/nixos/modules/services/networking/morty.nix
@@ -0,0 +1,98 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.morty;
+
+  configFile = cfg.configFile;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.morty = {
+
+      enable = mkEnableOption
+        "Morty proxy server. See https://github.com/asciimoo/morty";
+
+      ipv6 = mkOption {
+        type = types.bool;
+        default = true;
+        description = "Allow IPv6 HTTP requests?";
+        defaultText = "Allow IPv6 HTTP requests.";
+      };
+
+      key = mkOption {
+        type = types.string;
+        default = "";
+        description = "HMAC url validation key (hexadecimal encoded).
+	Leave blank to disable. Without validation key, anyone can
+	submit proxy requests. Leave blank to disable.";
+        defaultText = "No HMAC url validation. Generate with echo -n somevalue | openssl dgst -sha1 -hmac somekey";
+      };
+
+      timeout = mkOption {
+        type = types.int;
+        default = 2;
+        description = "Request timeout in seconds.";
+        defaultText = "A resource now gets 2 seconds to respond.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.morty;
+        defaultText = "pkgs.morty";
+        description = "morty package to use.";
+      };
+
+      port = mkOption {
+        type = types.int;
+        default = 3000;
+        description = "Listing port";
+      };
+
+      listenAddress = mkOption {
+        type = types.string;
+        default = "127.0.0.1";
+        description = "The address on which the service listens";
+        defaultText = "127.0.0.1 (localhost)";
+      };
+
+    };
+
+  };
+
+  ###### Service definition
+
+  config = mkIf config.services.morty.enable {
+
+    users.extraUsers.morty =
+      { description = "Morty user";
+        createHome = true;
+        home = "/var/lib/morty";
+      };
+
+    systemd.services.morty =
+      {
+        description = "Morty sanitizing proxy server.";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = "morty";
+          ExecStart = ''${cfg.package}/bin/morty              \
+	    -listen ${cfg.listenAddress}:${toString cfg.port} \
+	    ${optionalString cfg.ipv6 "-ipv6"}                \
+	    ${optionalString (cfg.key != "") "-key " + cfg.key} \
+	  '';
+        };
+      };
+    environment.systemPackages = [ cfg.package ];
+
+  };
+}
diff --git a/nixos/modules/services/networking/nat.nix b/nixos/modules/services/networking/nat.nix
index da3827c35e63..89d8590093dd 100644
--- a/nixos/modules/services/networking/nat.nix
+++ b/nixos/modules/services/networking/nat.nix
@@ -38,19 +38,19 @@ let
     # NAT the marked packets.
     ${optionalString (cfg.internalInterfaces != []) ''
       iptables -w -t nat -A nixos-nat-post -m mark --mark 1 \
-        -o ${cfg.externalInterface} ${dest}
+        ${optionalString (cfg.externalInterface != null) "-o ${cfg.externalInterface}"} ${dest}
     ''}
 
     # NAT packets coming from the internal IPs.
     ${concatMapStrings (range: ''
       iptables -w -t nat -A nixos-nat-post \
-        -s '${range}' -o ${cfg.externalInterface} ${dest}
+        -s '${range}' ${optionalString (cfg.externalInterface != null) "-o ${cfg.externalInterface}"} ${dest}
     '') cfg.internalIPs}
 
     # NAT from external ports to internal ports.
     ${concatMapStrings (fwd: ''
       iptables -w -t nat -A nixos-nat-pre \
-        -i ${cfg.externalInterface} -p ${fwd.proto} \
+        -i ${toString cfg.externalInterface} -p ${fwd.proto} \
         --dport ${builtins.toString fwd.sourcePort} \
         -j DNAT --to-destination ${fwd.destination}
 
@@ -81,7 +81,7 @@ let
 
     ${optionalString (cfg.dmzHost != null) ''
       iptables -w -t nat -A nixos-nat-pre \
-        -i ${cfg.externalInterface} -j DNAT \
+        -i ${toString cfg.externalInterface} -j DNAT \
         --to-destination ${cfg.dmzHost}
     ''}
 
@@ -134,7 +134,8 @@ in
     };
 
     networking.nat.externalInterface = mkOption {
-      type = types.str;
+      type = types.nullOr types.str;
+      default = null;
       example = "eth1";
       description =
         ''
@@ -236,6 +237,15 @@ in
     { networking.firewall.extraCommands = mkBefore flushNat; }
     (mkIf config.networking.nat.enable {
 
+      assertions = [
+        { assertion = (cfg.dmzHost != null)    -> (cfg.externalInterface != null);
+          message = "networking.nat.dmzHost requires networking.nat.externalInterface";
+        }
+        { assertion = (cfg.forwardPorts != []) -> (cfg.externalInterface != null);
+          message = "networking.nat.forwardPorts requires networking.nat.externalInterface";
+        }
+      ];
+
       environment.systemPackages = [ pkgs.iptables ];
 
       boot = {
diff --git a/nixos/modules/services/networking/networkmanager.nix b/nixos/modules/services/networking/networkmanager.nix
index f4c4adcaaeb8..816234506593 100644
--- a/nixos/modules/services/networking/networkmanager.nix
+++ b/nixos/modules/services/networking/networkmanager.nix
@@ -38,6 +38,8 @@ let
 
     [device]
     wifi.scan-rand-mac-address=${if cfg.wifi.scanRandMacAddress then "yes" else "no"}
+
+    ${cfg.extraConfig}
   '';
 
   /*
@@ -120,6 +122,14 @@ in {
         '';
       };
 
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Configuration appended to the generated NetworkManager.conf.
+        '';
+      };
+
       unmanaged = mkOption {
         type = types.listOf types.string;
         default = [];
diff --git a/nixos/modules/services/networking/openntpd.nix b/nixos/modules/services/networking/openntpd.nix
index 4bb9da54fe09..241038ca12ed 100644
--- a/nixos/modules/services/networking/openntpd.nix
+++ b/nixos/modules/services/networking/openntpd.nix
@@ -7,7 +7,7 @@ let
 
   package = pkgs.openntpd_nixos;
 
-  cfgFile = pkgs.writeText "openntpd.conf" ''
+  configFile = ''
     ${concatStringsSep "\n" (map (s: "server ${s}") cfg.servers)}
     ${cfg.extraConfig}
   '';
@@ -31,8 +31,8 @@ in
       type = with types; lines;
       default = "";
       example = ''
-        listen on 127.0.0.1 
-        listen on ::1 
+        listen on 127.0.0.1
+        listen on ::1
       '';
       description = ''
         Additional text appended to <filename>openntpd.conf</filename>.
@@ -57,6 +57,8 @@ in
     # Add ntpctl to the environment for status checking
     environment.systemPackages = [ package ];
 
+    environment.etc."ntpd.conf".text = configFile;
+
     users.extraUsers = singleton {
       name = "ntp";
       uid = config.ids.uids.ntp;
@@ -71,7 +73,7 @@ in
       before = [ "time-sync.target" ];
       after = [ "dnsmasq.service" "bind.service" "network-online.target" ];
       serviceConfig = {
-        ExecStart = "${package}/sbin/ntpd -f ${cfgFile} -p ${pidFile} ${cfg.extraOptions}";
+        ExecStart = "${package}/sbin/ntpd -p ${pidFile} ${cfg.extraOptions}";
         Type = "forking";
         PIDFile = pidFile;
       };
diff --git a/nixos/modules/services/networking/owamp.nix b/nixos/modules/services/networking/owamp.nix
new file mode 100644
index 000000000000..a0d3e70d8e57
--- /dev/null
+++ b/nixos/modules/services/networking/owamp.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.owamp;
+in
+{
+
+  ###### interface
+
+  options = {
+    services.owamp.enable = mkEnableOption ''Enable OWAMP server'';
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    users.extraUsers = singleton {
+      name = "owamp";
+      group = "owamp";
+      description = "Owamp daemon";
+    };
+
+    users.extraGroups = singleton {
+      name = "owamp";
+    };
+
+    systemd.services.owamp = {
+      description = "Owamp server";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart="${pkgs.owamp}/bin/owampd -R /run/owamp -d /run/owamp -v -Z ";
+        PrivateTmp = true;
+        Restart = "always";
+        Type="simple";
+        User = "owamp";
+        Group = "owamp";
+        RuntimeDirectory = "owamp";
+        StateDirectory = "owamp";
+        AmbientCapabilities = "cap_net_bind_service";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/ssh/sshd.nix b/nixos/modules/services/networking/ssh/sshd.nix
index aab1203086ce..961e72b2b810 100644
--- a/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixos/modules/services/networking/ssh/sshd.nix
@@ -272,6 +272,31 @@ in
         '';
       };
 
+      logLevel = mkOption {
+        type = types.enum [ "QUIET" "FATAL" "ERROR" "INFO" "VERBOSE" "DEBUG" "DEBUG1" "DEBUG2" "DEBUG3" ];
+        default = "VERBOSE";
+        description = ''
+          Gives the verbosity level that is used when logging messages from sshd(8). The possible values are:
+          QUIET, FATAL, ERROR, INFO, VERBOSE, DEBUG, DEBUG1, DEBUG2, and DEBUG3. The default is VERBOSE. DEBUG and DEBUG1
+          are equivalent. DEBUG2 and DEBUG3 each specify higher levels of debugging output. Logging with a DEBUG level
+          violates the privacy of users and is not recommended.
+
+          LogLevel VERBOSE logs user's key fingerprint on login.
+          Needed to have a clear audit track of which key was used to log in.
+        '';
+      };
+
+      useDns = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Specifies whether sshd(8) should look up the remote host name, and to check that the resolved host name for
+          the remote IP address maps back to the very same IP address.
+          If this option is set to no (the default) then only addresses and not host names may be used in
+          ~/.ssh/authorized_keys from and sshd_config Match Host directives.
+        '';
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         default = "";
@@ -309,7 +334,9 @@ in
     services.openssh.moduliFile = mkDefault "${cfgc.package}/etc/ssh/moduli";
 
     environment.etc = authKeysFiles //
-      { "ssh/moduli".source = cfg.moduliFile; };
+      { "ssh/moduli".source = cfg.moduliFile;
+        "ssh/sshd_config".text = cfg.extraConfig;
+      };
 
     systemd =
       let
@@ -340,7 +367,7 @@ in
               { ExecStart =
                   (optionalString cfg.startWhenNeeded "-") +
                   "${cfgc.package}/bin/sshd " + (optionalString cfg.startWhenNeeded "-i ") +
-                  "-f ${pkgs.writeText "sshd_config" cfg.extraConfig}";
+                  "-f /etc/ssh/sshd_config";
                 KillMode = "process";
               } // (if cfg.startWhenNeeded then {
                 StandardInput = "socket";
@@ -426,9 +453,14 @@ in
         Ciphers ${concatStringsSep "," cfg.ciphers}
         MACs ${concatStringsSep "," cfg.macs}
 
-        # LogLevel VERBOSE logs user's key fingerprint on login.
-        # Needed to have a clear audit track of which key was used to log in.
-        LogLevel VERBOSE
+        LogLevel ${cfg.logLevel}
+
+        ${if cfg.useDns then ''
+          UseDNS yes
+        '' else ''
+          UseDNS no
+        ''}
+
       '';
 
     assertions = [{ assertion = if cfg.forwardX11 then cfgc.setXAuthLocation else true;
diff --git a/nixos/modules/services/networking/sslh.nix b/nixos/modules/services/networking/sslh.nix
index e3d65c49fbf2..0222e8ce8b58 100644
--- a/nixos/modules/services/networking/sslh.nix
+++ b/nixos/modules/services/networking/sslh.nix
@@ -4,15 +4,14 @@ with lib;
 
 let
   cfg = config.services.sslh;
+  user = "sslh";
   configFile = pkgs.writeText "sslh.conf" ''
     verbose: ${boolToString cfg.verbose};
     foreground: true;
     inetd: false;
     numeric: false;
-    transparent: false;
+    transparent: ${boolToString cfg.transparent};
     timeout: "${toString cfg.timeout}";
-    user: "nobody";
-    pidfile: "${cfg.pidfile}";
 
     listen:
     (
@@ -50,16 +49,16 @@ in
         description = "Timeout in seconds.";
       };
 
-      pidfile = mkOption {
-        type = types.path;
-        default = "/run/sslh.pid";
-        description = "PID file path for sslh daemon.";
+      transparent = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Will the services behind sslh (Apache, sshd and so on) see the external IP and ports as if the external world connected directly to them";
       };
 
       listenAddress = mkOption {
         type = types.str;
-        default = config.networking.hostName;
-        description = "Listening hostname.";
+        default = "0.0.0.0";
+        description = "Listening address or hostname.";
       };
 
       port = mkOption {
@@ -76,14 +75,91 @@ in
     };
   };
 
-  config = mkIf cfg.enable {
-    systemd.services.sslh = {
-      description = "Applicative Protocol Multiplexer (e.g. share SSH and HTTPS on the same port)";
-      after = [ "network.target" ];
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig.ExecStart = "${pkgs.sslh}/bin/sslh -F${configFile}";
-      serviceConfig.KillMode = "process";
-      serviceConfig.PIDFile = "${cfg.pidfile}";
-    };
-  };
+  config = mkMerge [
+    (mkIf cfg.enable {
+      users.users.${user} = {
+        description = "sslh daemon user";
+        isSystemUser = true;
+      };
+
+      systemd.services.sslh = {
+        description = "Applicative Protocol Multiplexer (e.g. share SSH and HTTPS on the same port)";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          User                 = user;
+          Group                = "nogroup";
+          PermissionsStartOnly = true;
+          Restart              = "always";
+          RestartSec           = "1s";
+          ExecStart            = "${pkgs.sslh}/bin/sslh -F${configFile}";
+          KillMode             = "process";
+          AmbientCapabilities  = "CAP_NET_BIND_SERVICE CAP_NET_ADMIN CAP_SETGID CAP_SETUID";
+          PrivateTmp           = true;
+          PrivateDevices       = true;
+          ProtectSystem        = "full";
+          ProtectHome          = true;
+        };
+      };
+    })
+
+    # code from https://github.com/yrutschle/sslh#transparent-proxy-support
+    # the only difference is using iptables mark 0x2 instead of 0x1 to avoid conflicts with nixos/nat module
+    (mkIf (cfg.enable && cfg.transparent) {
+      # Set route_localnet = 1 on all interfaces so that ssl can use "localhost" as destination
+      boot.kernel.sysctl."net.ipv4.conf.default.route_localnet" = 1;
+      boot.kernel.sysctl."net.ipv4.conf.all.route_localnet"     = 1;
+
+      systemd.services.sslh = let
+        iptablesCommands = [
+          # DROP martian packets as they would have been if route_localnet was zero
+          # Note: packets not leaving the server aren't affected by this, thus sslh will still work
+          { table = "raw";    command = "PREROUTING  ! -i lo -d 127.0.0.0/8 -j DROP"; }
+          { table = "mangle"; command = "POSTROUTING ! -o lo -s 127.0.0.0/8 -j DROP"; }
+          # Mark all connections made by ssl for special treatment (here sslh is run as user ${user})
+          { table = "nat";    command = "OUTPUT -m owner --uid-owner ${user} -p tcp --tcp-flags FIN,SYN,RST,ACK SYN -j CONNMARK --set-xmark 0x02/0x0f"; }
+          # Outgoing packets that should go to sslh instead have to be rerouted, so mark them accordingly (copying over the connection mark)
+          { table = "mangle"; command = "OUTPUT ! -o lo -p tcp -m connmark --mark 0x02/0x0f -j CONNMARK --restore-mark --mask 0x0f"; }
+        ];
+        ip6tablesCommands = [
+          { table = "raw";    command = "PREROUTING  ! -i lo -d ::1/128     -j DROP"; }
+          { table = "mangle"; command = "POSTROUTING ! -o lo -s ::1/128     -j DROP"; }
+          { table = "nat";    command = "OUTPUT -m owner --uid-owner ${user} -p tcp --tcp-flags FIN,SYN,RST,ACK SYN -j CONNMARK --set-xmark 0x02/0x0f"; }
+          { table = "mangle"; command = "OUTPUT ! -o lo -p tcp -m connmark --mark 0x02/0x0f -j CONNMARK --restore-mark --mask 0x0f"; }
+        ];
+      in {
+        path = [ pkgs.iptables pkgs.iproute pkgs.procps ];
+
+        preStart = ''
+          # Cleanup old iptables entries which might be still there
+          ${concatMapStringsSep "\n" ({table, command}: "while iptables -w -t ${table} -D ${command} 2>/dev/null; do echo; done") iptablesCommands}
+          ${concatMapStringsSep "\n" ({table, command}:       "iptables -w -t ${table} -A ${command}"                           ) iptablesCommands}
+
+          # Configure routing for those marked packets
+          ip rule  add fwmark 0x2 lookup 100
+          ip route add local 0.0.0.0/0 dev lo table 100
+
+        '' + optionalString config.networking.enableIPv6 ''
+          ${concatMapStringsSep "\n" ({table, command}: "while ip6tables -w -t ${table} -D ${command} 2>/dev/null; do echo; done") ip6tablesCommands}
+          ${concatMapStringsSep "\n" ({table, command}:       "ip6tables -w -t ${table} -A ${command}"                           ) ip6tablesCommands}
+
+          ip -6 rule  add fwmark 0x2 lookup 100
+          ip -6 route add local ::/0 dev lo table 100
+        '';
+
+        postStop = ''
+          ${concatMapStringsSep "\n" ({table, command}: "iptables -w -t ${table} -D ${command}") iptablesCommands}
+
+          ip rule  del fwmark 0x2 lookup 100
+          ip route del local 0.0.0.0/0 dev lo table 100
+        '' + optionalString config.networking.enableIPv6 ''
+          ${concatMapStringsSep "\n" ({table, command}: "ip6tables -w -t ${table} -D ${command}") ip6tablesCommands}
+
+          ip -6 rule  del fwmark 0x2 lookup 100
+          ip -6 route del local ::/0 dev lo table 100
+        '';
+      };
+    })
+  ];
 }
diff --git a/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix b/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix
index ad211f41eef0..b16d299917fe 100644
--- a/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix
+++ b/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix
@@ -938,9 +938,12 @@ in {
         protection.
       '';
 
-      hw_offload = mkYesNoParam no ''
+      hw_offload = mkEnumParam ["yes" "no" "auto"] "no" ''
         Enable hardware offload for this CHILD_SA, if supported by the IPsec
-        implementation.
+        implementation. The value <literal>yes</literal> enforces offloading
+        and the installation will fail if it's not supported by either kernel or
+        device. The value <literal>auto</literal> enables offloading, if it's
+        supported, but the installation does not fail otherwise.
       '';
 
       start_action = mkEnumParam ["none" "trap" "start"] "none" ''
diff --git a/nixos/modules/services/networking/tinc.nix b/nixos/modules/services/networking/tinc.nix
index e3c9b5282b8c..77bcdae80191 100644
--- a/nixos/modules/services/networking/tinc.nix
+++ b/nixos/modules/services/networking/tinc.nix
@@ -163,12 +163,7 @@ in
         wantedBy = [ "multi-user.target" ];
         after = [ "network.target" ];
         path = [ data.package ];
-        restartTriggers =
-          let
-            drvlist = [ config.environment.etc."tinc/${network}/tinc.conf".source ]
-                        ++ mapAttrsToList (host: _: config.environment.etc."tinc/${network}/hosts/${host}".source) data.hosts;
-          in # drvlist might be too long to be used directly
-            [ (builtins.hashString "sha256" (concatMapStrings (d: d.outPath) drvlist)) ];
+        restartTriggers = [ config.environment.etc."tinc/${network}/tinc.conf".source ];
         serviceConfig = {
           Type = "simple";
           Restart = "always";
@@ -207,7 +202,8 @@ in
           ${concatStringsSep "\n" (mapAttrsToList (network: data:
             optionalString (versionAtLeast data.package.version "1.1pre") ''
               makeWrapper ${data.package}/bin/tinc "$out/bin/tinc.${network}" \
-                --add-flags "--pidfile=/run/tinc.${network}.pid"
+                --add-flags "--pidfile=/run/tinc.${network}.pid" \
+                --add-flags "--config=/etc/tinc/${network}"
             '') cfg.networks)}
         '';
       };
diff --git a/nixos/modules/services/networking/unbound.nix b/nixos/modules/services/networking/unbound.nix
index f069a9883a7f..07936faaa133 100644
--- a/nixos/modules/services/networking/unbound.nix
+++ b/nixos/modules/services/networking/unbound.nix
@@ -60,7 +60,7 @@ in
       };
 
       interfaces = mkOption {
-        default = [ "127.0.0.1" "::1" ];
+        default = [ "127.0.0.1" ] ++ optional config.networking.enableIPv6 "::1";
         type = types.listOf types.str;
         description = "What addresses the server should listen on.";
       };
@@ -112,8 +112,8 @@ in
         mkdir -m 0755 -p ${stateDir}/dev/
         cp ${confFile} ${stateDir}/unbound.conf
         ${optionalString cfg.enableRootTrustAnchor ''
-        ${pkgs.unbound}/bin/unbound-anchor -a ${rootTrustAnchorFile} || echo "Root anchor updated!"
-        chown unbound ${stateDir} ${rootTrustAnchorFile}
+          ${pkgs.unbound}/bin/unbound-anchor -a ${rootTrustAnchorFile} || echo "Root anchor updated!"
+          chown unbound ${stateDir} ${rootTrustAnchorFile}
         ''}
         touch ${stateDir}/dev/random
         ${pkgs.utillinux}/bin/mount --bind -n /dev/urandom ${stateDir}/dev/random
@@ -126,6 +126,8 @@ in
         ProtectSystem = true;
         ProtectHome = true;
         PrivateDevices = true;
+        Restart = "always";
+        RestartSec = "5s";
       };
     };
 
diff --git a/nixos/modules/services/networking/xrdp.nix b/nixos/modules/services/networking/xrdp.nix
index bf23c6ae6192..0e882873b4ba 100644
--- a/nixos/modules/services/networking/xrdp.nix
+++ b/nixos/modules/services/networking/xrdp.nix
@@ -97,6 +97,7 @@ in
     # xrdp can run X11 program even if "services.xserver.enable = false"
     environment.pathsToLink =
       [ "/etc/xdg" "/share/xdg" "/share/applications" "/share/icons" "/share/pixmaps" ];
+    fonts.enableDefaultFonts = mkDefault true;
 
     systemd = {
       services.xrdp = {
diff --git a/nixos/modules/services/scheduling/fcron.nix b/nixos/modules/services/scheduling/fcron.nix
index e3b6b638f5a7..0ea41f3c3985 100644
--- a/nixos/modules/services/scheduling/fcron.nix
+++ b/nixos/modules/services/scheduling/fcron.nix
@@ -128,6 +128,7 @@ in
         owner = "fcron";
         group = "fcron";
         setgid = true;
+        setuid = true;
       };
       fcrondyn = {
         source = "${pkgs.fcron}/bin/fcrondyn";
diff --git a/nixos/modules/services/security/munge.nix b/nixos/modules/services/security/munge.nix
index 919c2c2b0e15..5bca15833544 100644
--- a/nixos/modules/services/security/munge.nix
+++ b/nixos/modules/services/security/munge.nix
@@ -35,7 +35,15 @@ in
 
     environment.systemPackages = [ pkgs.munge ];
 
-    systemd.services.munged = { 
+    users.users.munge = {
+      description   = "Munge daemon user";
+      isSystemUser  = true;
+      group         = "munge";
+    };
+
+    users.groups.munge = {};
+
+    systemd.services.munged = {
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
 
@@ -44,14 +52,20 @@ in
       preStart = ''
         chmod 0700 ${cfg.password}
         mkdir -p /var/lib/munge -m 0711
+        chown -R munge:munge /var/lib/munge
         mkdir -p /var/log/munge -m 0700
+        chown -R munge:munge /var/log/munge
         mkdir -p /run/munge -m 0755
+        chown -R munge:munge /run/munge
       '';
 
       serviceConfig = {
         ExecStart = "${pkgs.munge}/bin/munged --syslog --key-file ${cfg.password}";
         PIDFile = "/run/munge/munged.pid";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        PermissionsStartOnly = "true";
+        User = "munge";
+        Group = "munge";
       };
 
     };
diff --git a/nixos/modules/services/security/oauth2_proxy.nix b/nixos/modules/services/security/oauth2_proxy.nix
index 433d97c2a7d7..96d78630e6d1 100644
--- a/nixos/modules/services/security/oauth2_proxy.nix
+++ b/nixos/modules/services/security/oauth2_proxy.nix
@@ -72,6 +72,7 @@ let
 
   mapConfig = key: attr:
   if (!isNull attr && attr != []) then (
+    if isDerivation attr then mapConfig key (toString attr) else
     if (builtins.typeOf attr) == "set" then concatStringsSep " "
       (mapAttrsToList (name: value: mapConfig (key + "-" + name) value) attr) else
     if (builtins.typeOf attr) == "list" then concatMapStringsSep " " (mapConfig key) attr else
diff --git a/nixos/modules/services/security/tor.nix b/nixos/modules/services/security/tor.nix
index 806252f49b8d..15200c49d70a 100644
--- a/nixos/modules/services/security/tor.nix
+++ b/nixos/modules/services/security/tor.nix
@@ -39,7 +39,7 @@ let
     ''}
 
     ${optint "ControlPort" cfg.controlPort}
-    ${optionalString cfg.controlSocket.enable "ControlSocket ${torRunDirectory}/control GroupWritable RelaxDirModeCheck"}
+    ${optionalString cfg.controlSocket.enable "ControlPort unix:${torRunDirectory}/control GroupWritable RelaxDirModeCheck"}
   ''
   # Client connection config
   + optionalString cfg.client.enable ''
@@ -360,7 +360,7 @@ in
 
                 <important>
                   <para>
-                    WARNING: THE FOLLOWING PARAGRAPH IS NOT LEGAL ADVISE.
+                    WARNING: THE FOLLOWING PARAGRAPH IS NOT LEGAL ADVICE.
                     Consult with your lawer when in doubt.
                   </para>
 
@@ -695,19 +695,38 @@ in
         uid         = config.ids.uids.tor;
       };
 
+    # We have to do this instead of using RuntimeDirectory option in
+    # the service below because systemd has no way to set owners of
+    # RuntimeDirectory and putting this into the service below
+    # requires that service to relax it's sandbox since this needs
+    # writable /run
+    systemd.services.tor-init =
+      { description = "Tor Daemon Init";
+        wantedBy = [ "tor.service" ];
+        after = [ "local-fs.target" ];
+        script = ''
+          install -m 0700 -o tor -g tor -d ${torDirectory} ${torDirectory}/onion
+          install -m 0750 -o tor -g tor -d ${torRunDirectory}
+        '';
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+      };
+
     systemd.services.tor =
       { description = "Tor Daemon";
         path = [ pkgs.tor ];
 
         wantedBy = [ "multi-user.target" ];
-        after    = [ "network.target" ];
+        after    = [ "tor-init.service" "network.target" ];
         restartTriggers = [ torRcFile ];
 
         serviceConfig =
           { Type         = "simple";
             # Translated from the upstream contrib/dist/tor.service.in
             ExecStartPre = "${pkgs.tor}/bin/tor -f ${torRcFile} --verify-config";
-            ExecStart    = "${pkgs.tor}/bin/tor -f ${torRcFile} --RunAsDaemon 0";
+            ExecStart    = "${pkgs.tor}/bin/tor -f ${torRcFile}";
             ExecReload   = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
             KillSignal   = "SIGINT";
             TimeoutSec   = 30;
@@ -715,20 +734,18 @@ in
             LimitNOFILE  = 32768;
 
             # Hardening
-            # Note: DevicePolicy is set to 'closed', although the
-            # minimal permissions are really:
-            #   DeviceAllow /dev/null rw
-            #   DeviceAllow /dev/urandom r
-            # .. but we can't specify DeviceAllow multiple times. 'closed'
-            # is close enough.
-            RuntimeDirectory        = "tor";
-            StateDirectory          = [ "tor" "tor/onion" ];
-            PrivateTmp              = "yes";
-            DevicePolicy            = "closed";
-            InaccessibleDirectories = "/home";
-            ReadOnlyDirectories     = "/";
-            ReadWriteDirectories    = [torDirectory torRunDirectory];
+            # this seems to unshare /run despite what systemd.exec(5) says
+            PrivateTmp              = mkIf (!cfg.controlSocket.enable) "yes";
+            PrivateDevices          = "yes";
+            ProtectHome             = "yes";
+            ProtectSystem           = "strict";
+            InaccessiblePaths       = "/home";
+            ReadOnlyPaths           = "/";
+            ReadWritePaths          = [ torDirectory torRunDirectory ];
             NoNewPrivileges         = "yes";
+
+            # tor.service.in has this in, but this line it fails to spawn a namespace when using hidden services
+            #CapabilityBoundingSet   = "CAP_SETUID CAP_SETGID CAP_NET_BIND_SERVICE";
           };
       };
 
diff --git a/nixos/modules/services/web-apps/atlassian/crowd.nix b/nixos/modules/services/web-apps/atlassian/crowd.nix
index 0ac941b6ec99..778e4afa1e0b 100644
--- a/nixos/modules/services/web-apps/atlassian/crowd.nix
+++ b/nixos/modules/services/web-apps/atlassian/crowd.nix
@@ -126,12 +126,13 @@ in
       };
 
       preStart = ''
-        mkdir -p ${cfg.home}/{logs,work,database}
+        rm -rf ${cfg.home}/work
+        mkdir -p ${cfg.home}/{logs,database,work}
 
         mkdir -p /run/atlassian-crowd
         ln -sf ${cfg.home}/{database,work,server.xml} /run/atlassian-crowd
 
-        chown -R ${cfg.user} ${cfg.home}
+        chown -R ${cfg.user}:${cfg.group} ${cfg.home}
 
         sed -e 's,port="8095",port="${toString cfg.listenPort}" address="${cfg.listenAddress}",' \
         '' + (lib.optionalString cfg.proxy.enable ''
diff --git a/nixos/modules/services/web-apps/mattermost.nix b/nixos/modules/services/web-apps/mattermost.nix
index be74a2b1955b..96792c47cd24 100644
--- a/nixos/modules/services/web-apps/mattermost.nix
+++ b/nixos/modules/services/web-apps/mattermost.nix
@@ -25,7 +25,7 @@ in
 {
   options = {
     services.mattermost = {
-      enable = mkEnableOption "Mattermost chat platform";
+      enable = mkEnableOption "Mattermost chat server";
 
       statePath = mkOption {
         type = types.str;
@@ -167,7 +167,7 @@ in
       '';
 
       systemd.services.mattermost = {
-        description = "Mattermost chat platform service";
+        description = "Mattermost chat service";
         wantedBy = [ "multi-user.target" ];
         after = [ "network.target" "postgresql.service" ];
 
@@ -201,13 +201,13 @@ in
           PermissionsStartOnly = true;
           User = cfg.user;
           Group = cfg.group;
-          ExecStart = "${pkgs.mattermost}/bin/mattermost-platform";
+          ExecStart = "${pkgs.mattermost}/bin/mattermost";
           WorkingDirectory = "${cfg.statePath}";
-          JoinsNamespaceOf = mkIf cfg.localDatabaseCreate "postgresql.service";
           Restart = "always";
           RestartSec = "10";
           LimitNOFILE = "49152";
         };
+        unitConfig.JoinsNamespaceOf = mkIf cfg.localDatabaseCreate "postgresql.service";
       };
     })
     (mkIf cfg.matterircd.enable {
diff --git a/nixos/modules/services/web-apps/nexus.nix b/nixos/modules/services/web-apps/nexus.nix
index d5bd0f12febb..b0eaee6040e3 100644
--- a/nixos/modules/services/web-apps/nexus.nix
+++ b/nixos/modules/services/web-apps/nexus.nix
@@ -13,6 +13,12 @@ in
     services.nexus = {
       enable = mkEnableOption "Sonatype Nexus3 OSS service";
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.nexus;
+        description = "Package which runs Nexus3";
+      };
+
       user = mkOption {
         type = types.str;
         default = "nexus";
@@ -55,10 +61,10 @@ in
           -XX:LogFile=${cfg.home}/nexus3/log/jvm.log
           -XX:-OmitStackTraceInFastThrow
           -Djava.net.preferIPv4Stack=true
-          -Dkaraf.home=${pkgs.nexus}
-          -Dkaraf.base=${pkgs.nexus}
-          -Dkaraf.etc=${pkgs.nexus}/etc/karaf
-          -Djava.util.logging.config.file=${pkgs.nexus}/etc/karaf/java.util.logging.properties
+          -Dkaraf.home=${cfg.package}
+          -Dkaraf.base=${cfg.package}
+          -Dkaraf.etc=${cfg.package}/etc/karaf
+          -Djava.util.logging.config.file=${cfg.package}/etc/karaf/java.util.logging.properties
           -Dkaraf.data=${cfg.home}/nexus3
           -Djava.io.tmpdir=${cfg.home}/nexus3/tmp
           -Dkaraf.startLocalConsole=false
@@ -112,7 +118,7 @@ in
         fi
       '';
 
-      script = "${pkgs.nexus}/bin/nexus run";
+      script = "${cfg.package}/bin/nexus run";
 
       serviceConfig = {
         User = cfg.user;
diff --git a/nixos/modules/services/web-apps/tt-rss.nix b/nixos/modules/services/web-apps/tt-rss.nix
index 610c6463a5eb..1646ee5964fb 100644
--- a/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixos/modules/services/web-apps/tt-rss.nix
@@ -76,6 +76,8 @@ let
       define('SMTP_FROM_NAME', '${escape ["'" "\\"] cfg.email.fromName}');
       define('SMTP_FROM_ADDRESS', '${escape ["'" "\\"] cfg.email.fromAddress}');
       define('DIGEST_SUBJECT', '${escape ["'" "\\"] cfg.email.digestSubject}');
+
+      ${cfg.extraConfig}
   '';
 
  in {
@@ -431,6 +433,26 @@ let
         '';
       };
 
+      pluginPackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = ''
+          List of plugins to install. The list elements are expected to
+          be derivations. All elements in this derivation are automatically
+          copied to the <literal>plugins.local</literal> directory.
+        '';
+      };
+
+      themePackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = ''
+          List of themes to install. The list elements are expected to
+          be derivations. All elements in this derivation are automatically
+          copied to the <literal>themes.local</literal> directory.
+        '';
+      };
+
       logDestination = mkOption {
         type = types.enum ["" "sql" "syslog"];
         default = "sql";
@@ -441,6 +463,14 @@ let
           error.log).
         '';
       };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Additional lines to append to <literal>config.php</literal>.
+        '';
+      };
     };
   };
 
@@ -517,6 +547,16 @@ let
           rm -rf "${cfg.root}/*"
           mkdir -m 755 -p "${cfg.root}"
           cp -r "${pkgs.tt-rss}/"* "${cfg.root}"
+          ${optionalString (cfg.pluginPackages != []) ''
+            for plugin in ${concatStringsSep " " cfg.pluginPackages}; do
+              cp -r "$plugin"/* "${cfg.root}/plugins.local/"
+            done
+          ''}
+          ${optionalString (cfg.themePackages != []) ''
+            for theme in ${concatStringsSep " " cfg.themePackages}; do
+              cp -r "$theme"/* "${cfg.root}/themes.local/"
+            done
+          ''}
           ln -sf "${tt-rss-config}" "${cfg.root}/config.php"
           chown -R "${cfg.user}" "${cfg.root}"
           chmod -R 755 "${cfg.root}"
diff --git a/nixos/modules/services/web-apps/virtlyst.nix b/nixos/modules/services/web-apps/virtlyst.nix
new file mode 100644
index 000000000000..2fc67435ce82
--- /dev/null
+++ b/nixos/modules/services/web-apps/virtlyst.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.virtlyst;
+  stateDir = "/var/lib/virtlyst";
+
+  ini = pkgs.writeText "virtlyst-config.ini" ''
+    [wsgi]
+    master = true
+    threads = auto
+    http-socket = ${cfg.httpSocket}
+    application = ${pkgs.virtlyst}/lib/libVirtlyst.so
+    chdir2 = ${stateDir}
+    static-map = /static=${pkgs.virtlyst}/root/static
+
+    [Cutelyst]
+    production = true
+    DatabasePath = virtlyst.sqlite
+    TemplatePath = ${pkgs.virtlyst}/root/src
+
+    [Rules]
+    cutelyst.* = true
+    virtlyst.* = true
+  '';
+
+in
+
+{
+
+  options.services.virtlyst = {
+    enable = mkEnableOption "Virtlyst libvirt web interface";
+
+    adminPassword = mkOption {
+      type = types.str;
+      description = ''
+        Initial admin password with which the database will be seeded.
+      '';
+    };
+
+    httpSocket = mkOption {
+      type = types.str;
+      default = "localhost:3000";
+      description = ''
+        IP and/or port to which to bind the http socket.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.extraUsers.virtlyst = {
+      home = stateDir;
+      createHome = true;
+      group = mkIf config.virtualisation.libvirtd.enable "libvirtd";
+    };
+
+    systemd.services.virtlyst = {
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        VIRTLYST_ADMIN_PASSWORD = cfg.adminPassword;
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.cutelyst}/bin/cutelyst-wsgi2 --ini ${ini}";
+        User = "virtlyst";
+        WorkingDirectory = stateDir;
+      };
+    };
+  };
+
+}
diff --git a/nixos/modules/services/web-servers/meguca.nix b/nixos/modules/services/web-servers/meguca.nix
new file mode 100644
index 000000000000..8ae86c67a29f
--- /dev/null
+++ b/nixos/modules/services/web-servers/meguca.nix
@@ -0,0 +1,158 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.meguca;
+  postgres = config.services.postgresql;
+in
+{
+  options.services.meguca = {
+    enable = mkEnableOption "meguca";
+
+    baseDir = mkOption {
+      type = types.path;
+      default = "/run/meguca";
+      description = "Location where meguca stores it's database and links.";
+    };
+
+    password = mkOption {
+      type = types.str;
+      default = "meguca";
+      description = "Password for the meguca database.";
+    };
+
+    passwordFile = mkOption {
+      type = types.path;
+      default = "/run/keys/meguca-password-file";
+      description = "Password file for the meguca database.";
+    };
+
+    reverseProxy = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Reverse proxy IP.";
+    };
+
+    sslCertificate = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Path to the SSL certificate.";
+    };
+
+    listenAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Listen on a specific IP address and port.";
+    };
+
+    cacheSize = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = "Cache size in MB.";
+    };
+
+    postgresArgs = mkOption {
+      type = types.str;
+      default = "user=meguca password=" + cfg.password + " dbname=meguca sslmode=disable";
+      description = "Postgresql connection arguments.";
+    };
+
+    postgresArgsFile = mkOption {
+      type = types.path;
+      default = "/run/keys/meguca-postgres-args";
+      description = "Postgresql connection arguments file.";
+    };
+
+    compressTraffic = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Compress all traffic with gzip.";
+    };
+
+    assumeReverseProxy = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Assume the server is behind a reverse proxy, when resolving client IPs.";
+    };
+
+    httpsOnly = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Serve and listen only through HTTPS.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.sudo.enable = cfg.enable == true;
+    services.postgresql.enable = cfg.enable == true;
+
+    services.meguca.passwordFile = mkDefault (toString (pkgs.writeTextFile {
+      name = "meguca-password-file";
+      text = cfg.password;
+    }));
+
+    services.meguca.postgresArgsFile = mkDefault (toString (pkgs.writeTextFile {
+      name = "meguca-postgres-args";
+      text = cfg.postgresArgs;
+    }));
+
+    systemd.services.meguca = {
+      description = "meguca";
+      after = [ "network.target" "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        # Ensure folder exists and links are correct or create them
+        mkdir -p ${cfg.baseDir}
+        ln -sf ${pkgs.meguca}/share/meguca/www ${cfg.baseDir}
+
+        # Ensure the database is correct or create it
+        ${pkgs.sudo}/bin/sudo -u ${postgres.superUser} ${postgres.package}/bin/createuser \
+          -SDR meguca || true
+        ${pkgs.sudo}/bin/sudo -u ${postgres.superUser} ${postgres.package}/bin/psql \
+          -c "ALTER ROLE meguca WITH PASSWORD '$(cat ${cfg.passwordFile})';" || true
+        ${pkgs.sudo}/bin/sudo -u ${postgres.superUser} ${postgres.package}/bin/createdb \
+          -T template0 -E UTF8 -O meguca meguca || true
+      '';
+
+    script = ''
+      cd ${cfg.baseDir}
+
+      ${pkgs.meguca}/bin/meguca -d "$(cat ${cfg.postgresArgsFile})"\
+        ${optionalString (cfg.reverseProxy != null) " -R ${cfg.reverseProxy}"}\
+        ${optionalString (cfg.sslCertificate != null) " -S ${cfg.sslCertificate}"}\
+        ${optionalString (cfg.listenAddress != null) " -a ${cfg.listenAddress}"}\
+        ${optionalString (cfg.cacheSize != null) " -c ${toString cfg.cacheSize}"}\
+        ${optionalString (cfg.compressTraffic) " -g"}\
+        ${optionalString (cfg.assumeReverseProxy) " -r"}\
+        ${optionalString (cfg.httpsOnly) " -s"} start
+    '';
+
+      serviceConfig = {
+        PermissionsStartOnly = true;
+        Type = "forking";
+        User = "meguca";
+        Group = "meguca";
+        RuntimeDirectory = "meguca";
+        ExecStop = "${pkgs.meguca}/bin/meguca stop";
+      };
+    };
+
+    users = {
+      extraUsers.meguca = {
+        description = "meguca server service user";
+        home = cfg.baseDir;
+        createHome = true;
+        group = "meguca";
+        uid = config.ids.uids.meguca;
+      };
+
+      extraGroups.meguca = {
+        gid = config.ids.gids.meguca;
+        members = [ "meguca" ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ chiiruno ];
+}
diff --git a/nixos/modules/services/web-servers/minio.nix b/nixos/modules/services/web-servers/minio.nix
index 843f0d986877..7ead33483ea4 100644
--- a/nixos/modules/services/web-servers/minio.nix
+++ b/nixos/modules/services/web-servers/minio.nix
@@ -85,7 +85,7 @@ in
       '';
       serviceConfig = {
         PermissionsStartOnly = true;
-        ExecStart = "${cfg.package}/bin/minio server --address ${cfg.listenAddress} --config-dir=${cfg.configDir} ${cfg.dataDir}";
+        ExecStart = "${cfg.package}/bin/minio server --json --address ${cfg.listenAddress} --config-dir=${cfg.configDir} ${cfg.dataDir}";
         Type = "simple";
         User = "minio";
         Group = "minio";
diff --git a/nixos/modules/services/web-servers/tomcat.nix b/nixos/modules/services/web-servers/tomcat.nix
index aa94e0e976c9..bc713a08f18f 100644
--- a/nixos/modules/services/web-servers/tomcat.nix
+++ b/nixos/modules/services/web-servers/tomcat.nix
@@ -110,7 +110,7 @@ in
       webapps = mkOption {
         type = types.listOf types.package;
         default = [ tomcat.webapps ];
-        defaultText = "[ tomcat.webapps ]";
+        defaultText = "[ pkgs.tomcat85.webapps ]";
         description = "List containing WAR files or directories with WAR files which are web applications to be deployed on Tomcat";
       };
 
diff --git a/nixos/modules/services/web-servers/uwsgi.nix b/nixos/modules/services/web-servers/uwsgi.nix
index 14596bb3add0..356b896a6dc9 100644
--- a/nixos/modules/services/web-servers/uwsgi.nix
+++ b/nixos/modules/services/web-servers/uwsgi.nix
@@ -31,9 +31,7 @@ let
         inherit python;
       };
 
-      penv = python.buildEnv.override {
-        extraLibs = (c.pythonPackages or (self: [])) pythonPackages;
-      };
+      pythonEnv = python.withPackages (c.pythonPackages or (self: []));
 
       uwsgiCfg = {
         uwsgi =
@@ -42,7 +40,7 @@ let
               inherit plugins;
             } // removeAttrs c [ "type" "pythonPackages" ]
               // optionalAttrs (python != null) {
-                pythonpath = "${penv}/${python.sitePackages}";
+                pythonpath = "${pythonEnv}/${python.sitePackages}";
                 env =
                   # Argh, uwsgi expects list of key-values there instead of a dictionary.
                   let env' = c.env or [];
@@ -51,7 +49,7 @@ let
                            then substring (stringLength "PATH=") (stringLength x) x
                            else null;
                       oldPaths = filter (x: x != null) (map getPath env');
-                  in env' ++ [ "PATH=${optionalString (oldPaths != []) "${last oldPaths}:"}${penv}/bin" ];
+                  in env' ++ [ "PATH=${optionalString (oldPaths != []) "${last oldPaths}:"}${pythonEnv}/bin" ];
               }
           else if c.type == "emperor"
             then {
diff --git a/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix b/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix
new file mode 100644
index 000000000000..ba8151a60f20
--- /dev/null
+++ b/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  cfg = ldmcfg.greeters.mini;
+
+  xgreeters = pkgs.linkFarm "lightdm-mini-greeter-xgreeters" [{
+    path = "${pkgs.lightdm-mini-greeter}/share/xgreeters/lightdm-mini-greeter.desktop";
+    name = "lightdm-mini-greeter.desktop";
+  }];
+
+  miniGreeterConf = pkgs.writeText "lightdm-mini-greeter.conf"
+    ''
+    [greeter]
+    user = ${cfg.user}
+    show-password-label = true
+    password-label-text = Password:
+    show-input-cursor = true
+
+    [greeter-hotkeys]
+    mod-key = meta
+    shutdown-key = s
+    restart-key = r
+    hibernate-key = h
+    suspend-key = u
+
+    [greeter-theme]
+    font = Sans
+    font-size = 1em
+    text-color = "#080800"
+    error-color = "#F8F8F0"
+    background-image = "${ldmcfg.background}"
+    background-color = "#1B1D1E"
+    window-color = "#F92672"
+    border-color = "#080800"
+    border-width = 2px
+    layout-space = 15
+    password-color = "#F8F8F0"
+    password-background-color = "#1B1D1E"
+
+    ${cfg.extraConfig}
+    '';
+
+in
+{
+  options = {
+
+    services.xserver.displayManager.lightdm.greeters.mini = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable lightdm-mini-greeter as the lightdm greeter.
+
+          Note that this greeter starts only the default X session.
+          You can configure the default X session by
+          <option>services.xserver.desktopManager.default</option> and
+          <option>services.xserver.windowManager.default</option>.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "root";
+        description = ''
+          The user to login as.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra configuration that should be put in the lightdm-mini-greeter.conf
+          configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+
+    services.xserver.displayManager.lightdm.greeters.gtk.enable = false;
+
+    services.xserver.displayManager.lightdm.greeter = mkDefault {
+      package = xgreeters;
+      name = "lightdm-mini-greeter";
+    };
+
+    environment.etc."lightdm/lightdm-mini-greeter.conf".source = miniGreeterConf;
+
+  };
+}
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index 9d30155a7234..5beadacdfa93 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -72,6 +72,7 @@ in
   # preferred.
   imports = [
     ./lightdm-greeters/gtk.nix
+    ./lightdm-greeters/mini.nix
   ];
 
   options = {
diff --git a/nixos/modules/services/x11/display-managers/sddm.nix b/nixos/modules/services/x11/display-managers/sddm.nix
index 2d4cb8aa20a5..df782e82ed15 100644
--- a/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixos/modules/services/x11/display-managers/sddm.nix
@@ -65,6 +65,10 @@ let
     XauthPath=${pkgs.xorg.xauth}/bin/xauth
     DisplayCommand=${Xsetup}
     DisplayStopCommand=${Xstop}
+    EnableHidpi=${if cfg.enableHidpi then "true" else "false"}
+
+    [Wayland]
+    EnableHidpi=${if cfg.enableHidpi then "true" else "false"}
 
     ${optionalString cfg.autoLogin.enable ''
     [Autologin]
@@ -95,6 +99,17 @@ in
         '';
       };
 
+      enableHidpi = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Whether to enable automatic HiDPI mode.
+          </para>
+          <para>
+          Versions up to 0.17 are broken so this only works from 0.18 onwards.
+        '';
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         default = "";
diff --git a/nixos/modules/services/x11/window-managers/awesome.nix b/nixos/modules/services/x11/window-managers/awesome.nix
index 71eb02ec5954..089e9f769f0a 100644
--- a/nixos/modules/services/x11/window-managers/awesome.nix
+++ b/nixos/modules/services/x11/window-managers/awesome.nix
@@ -37,6 +37,11 @@ in
         apply = pkg: if pkg == null then pkgs.awesome else pkg;
       };
 
+      noArgb = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Disable client transparency support, which can be greatly detrimental to performance in some setups";
+      };
     };
 
   };
@@ -50,7 +55,7 @@ in
       { name = "awesome";
         start =
           ''
-            ${awesome}/bin/awesome ${makeSearchPath cfg.luaModules} &
+            ${awesome}/bin/awesome ${lib.optionalString cfg.noArgb "--no-argb"} ${makeSearchPath cfg.luaModules} &
             waitPID=$!
           '';
       };
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 1404231f837e..3048cd02683f 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -244,6 +244,13 @@ in
           "ati_unfree" "amdgpu" "amdgpu-pro"
           "nv" "nvidia" "nvidiaLegacy340" "nvidiaLegacy304"
         ];
+        # TODO(@oxij): think how to easily add the rest, like those nvidia things
+        relatedPackages = concatLists
+          (mapAttrsToList (n: v:
+            optional (hasPrefix "xf86video" n) {
+              path  = [ "xorg" n ];
+              title = removePrefix "xf86video" n;
+            }) pkgs.xorg);
         description = ''
           The names of the video drivers the configuration
           supports. They will be tried in order until one that
diff --git a/nixos/modules/system/boot/initrd-network.nix b/nixos/modules/system/boot/initrd-network.nix
index 33862b0965cc..384ae909b701 100644
--- a/nixos/modules/system/boot/initrd-network.nix
+++ b/nixos/modules/system/boot/initrd-network.nix
@@ -12,6 +12,7 @@ let
       if [ "$1" = bound ]; then
         ip address add "$ip/$mask" dev "$interface"
         if [ -n "$router" ]; then
+          ip route add "$router" dev "$interface" # just in case if "$router" is not within "$ip/$mask" (e.g. Hetzner Cloud)
           ip route add default via "$router" dev "$interface"
         fi
         if [ -n "$dns" ]; then
diff --git a/nixos/modules/system/boot/loader/grub/grub.nix b/nixos/modules/system/boot/loader/grub/grub.nix
index e2cff1c1bd94..42da65857221 100644
--- a/nixos/modules/system/boot/loader/grub/grub.nix
+++ b/nixos/modules/system/boot/loader/grub/grub.nix
@@ -64,9 +64,10 @@ let
       )) + ":" + (makeSearchPathOutput "bin" "sbin" [
         pkgs.mdadm pkgs.utillinux
       ]);
-      font = if lib.last (lib.splitString "." cfg.font) == "pf2"
+      font = if cfg.font == null then ""
+        else (if lib.last (lib.splitString "." cfg.font) == "pf2"
              then cfg.font
-             else "${convertedFont}";
+             else "${convertedFont}");
     });
 
   bootDeviceCounters = fold (device: attr: attr // { "${device}" = (attr."${device}" or 0) + 1; }) {}
@@ -308,10 +309,22 @@ in
         type = types.nullOr types.path;
         example = literalExample "./my-background.png";
         description = ''
-          Background image used for GRUB.  It must be a 640x480,
+          Background image used for GRUB.
+          Set to <literal>null</literal> to run GRUB in text mode.
+
+          <note><para>
+          For grub 1:
+          It must be a 640x480,
           14-colour image in XPM format, optionally compressed with
-          <command>gzip</command> or <command>bzip2</command>.  Set to
-          <literal>null</literal> to run GRUB in text mode.
+          <command>gzip</command> or <command>bzip2</command>.
+          </para></note>
+
+          <note><para>
+          For grub 2:
+          File must be one of .png, .tga, .jpg, or .jpeg. JPEG images must
+          not be progressive.
+          The image will be scaled if necessary to fit the screen.
+          </para></note>
         '';
       };
 
@@ -372,8 +385,9 @@ in
       };
 
       default = mkOption {
-        default = 0;
-        type = types.int;
+        default = "0";
+        type = types.either types.int types.str;
+        apply = toString;
         description = ''
           Index of the default menu item to be booted.
         '';
diff --git a/nixos/modules/system/boot/loader/grub/install-grub.pl b/nixos/modules/system/boot/loader/grub/install-grub.pl
index 8bd203106f55..d1ff6e6bf525 100644
--- a/nixos/modules/system/boot/loader/grub/install-grub.pl
+++ b/nixos/modules/system/boot/loader/grub/install-grub.pl
@@ -54,7 +54,7 @@ my $splashImage = get("splashImage");
 my $configurationLimit = int(get("configurationLimit"));
 my $copyKernels = get("copyKernels") eq "true";
 my $timeout = int(get("timeout"));
-my $defaultEntry = int(get("default"));
+my $defaultEntry = get("default");
 my $fsIdentifier = get("fsIdentifier");
 my $grubEfi = get("grubEfi");
 my $grubTargetEfi = get("grubTargetEfi");
@@ -281,30 +281,36 @@ else {
         else
           insmod vbe
         fi
-        insmod font
-        if loadfont " . $grubBoot->path . "/converted-font.pf2; then
-          insmod gfxterm
-          if [ \"\${grub_platform}\" = \"efi\" ]; then
-            set gfxmode=$gfxmodeEfi
-            set gfxpayload=keep
-          else
-            set gfxmode=$gfxmodeBios
-            set gfxpayload=text
-          fi
-          terminal_output gfxterm
-        fi
     ";
 
     if ($font) {
         copy $font, "$bootPath/converted-font.pf2" or die "cannot copy $font to $bootPath\n";
+        $conf .= "
+            insmod font
+            if loadfont " . $grubBoot->path . "/converted-font.pf2; then
+              insmod gfxterm
+              if [ \"\${grub_platform}\" = \"efi\" ]; then
+                set gfxmode=$gfxmodeEfi
+                set gfxpayload=keep
+              else
+                set gfxmode=$gfxmodeBios
+                set gfxpayload=text
+              fi
+              terminal_output gfxterm
+            fi
+        ";
     }
     if ($splashImage) {
-        # FIXME: GRUB 1.97 doesn't resize the background image if it
-        # doesn't match the video resolution.
-        copy $splashImage, "$bootPath/background.png" or die "cannot copy $splashImage to $bootPath\n";
+        # Keeps the image's extension.
+        my ($filename, $dirs, $suffix) = fileparse($splashImage, qr"\..[^.]*$");
+        # The module for jpg is jpeg.
+        if ($suffix eq ".jpg") {
+            $suffix = ".jpeg";
+        }
+        copy $splashImage, "$bootPath/background$suffix" or die "cannot copy $splashImage to $bootPath\n";
         $conf .= "
-            insmod png
-            if background_image " . $grubBoot->path . "/background.png; then
+            insmod " . substr($suffix, 1) . "
+            if background_image " . $grubBoot->path . "/background$suffix; then
               set color_normal=white/black
               set color_highlight=black/white
             else
diff --git a/nixos/modules/system/boot/stage-1-init.sh b/nixos/modules/system/boot/stage-1-init.sh
index 92e68b72664a..de8451bbe31b 100644
--- a/nixos/modules/system/boot/stage-1-init.sh
+++ b/nixos/modules/system/boot/stage-1-init.sh
@@ -251,6 +251,9 @@ checkFS() {
     # Skip fsck for bcachefs - not implemented yet.
     if [ "$fsType" = bcachefs ]; then return 0; fi
 
+    # Skip fsck for nilfs2 - not needed by design and no fsck tool for this filesystem.
+    if [ "$fsType" = nilfs2 ]; then return 0; fi
+
     # Skip fsck for inherently readonly filesystems.
     if [ "$fsType" = squashfs ]; then return 0; fi
 
diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
index 55bb6d3449c5..6756f68cdf72 100644
--- a/nixos/modules/system/boot/stage-1.nix
+++ b/nixos/modules/system/boot/stage-1.nix
@@ -56,6 +56,12 @@ let
       left=("''${left[@]:3}")
       if [ -z ''${seen[$next]+x} ]; then
         seen[$next]=1
+
+        # Ignore the dynamic linker which for some reason appears as a DT_NEEDED of glibc but isn't in glibc's RPATH.
+        case "$next" in
+          ld*.so.?) continue;;
+        esac
+
         IFS=: read -ra paths <<< $rpath
         res=
         for path in "''${paths[@]}"; do
diff --git a/nixos/modules/system/boot/systemd-lib.nix b/nixos/modules/system/boot/systemd-lib.nix
index ae9ee8811f77..8b37bf8d35d8 100644
--- a/nixos/modules/system/boot/systemd-lib.nix
+++ b/nixos/modules/system/boot/systemd-lib.nix
@@ -78,10 +78,16 @@ in rec {
     optional (badFields != [ ])
       "Systemd ${group} has extra fields [${concatStringsSep " " badFields}].";
 
-  checkUnitConfig = group: checks: v:
-    let errors = concatMap (c: c group v) checks; in
-    if errors == [] then true
-      else builtins.trace (concatStringsSep "\n" errors) false;
+  checkUnitConfig = group: checks: attrs: let
+    # We're applied at the top-level type (attrsOf unitOption), so the actual
+    # unit options might contain attributes from mkOverride that we need to
+    # convert into single values before checking them.
+    defs = mapAttrs (const (v:
+      if v._type or "" == "override" then v.content else v
+    )) attrs;
+    errors = concatMap (c: c group defs) checks;
+  in if errors == [] then true
+     else builtins.trace (concatStringsSep "\n" errors) false;
 
   toOption = x:
     if x == true then "true"
diff --git a/nixos/modules/system/boot/timesyncd.nix b/nixos/modules/system/boot/timesyncd.nix
index f643723ab141..57853c5698d0 100644
--- a/nixos/modules/system/boot/timesyncd.nix
+++ b/nixos/modules/system/boot/timesyncd.nix
@@ -34,7 +34,7 @@ with lib;
 
     environment.etc."systemd/timesyncd.conf".text = ''
       [Time]
-      NTP=${concatStringsSep " " config.services.ntp.servers}
+      NTP=${concatStringsSep " " config.services.timesyncd.servers}
     '';
 
     users.extraUsers.systemd-timesync.uid = config.ids.uids.systemd-timesync;
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index c3bf897d51fd..bfcd81d62159 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -171,8 +171,12 @@ in
         default = config.boot.zfs.enableUnstable;
         description = ''
           Request encryption keys or passwords for all encrypted datasets on import.
-
           Dataset encryption is only supported in zfsUnstable at the moment.
+          For root pools the encryption key can be supplied via both an
+          interactive prompt (keylocation=prompt) and from a file
+          (keylocation=file://). Note that for data pools the encryption key can
+          be only loaded from a file and not via interactive prompt since the
+          import is processed in a background systemd service.
         '';
       };
 
@@ -394,6 +398,7 @@ in
             script = ''
               zpool_cmd="${packages.zfsUser}/sbin/zpool"
               ("$zpool_cmd" list "${pool}" >/dev/null) || "$zpool_cmd" import -d ${cfgZfs.devNodes} -N ${optionalString cfgZfs.forceImportAll "-f"} "${pool}"
+              ${optionalString cfgZfs.requestEncryptionCredentials "\"${packages.zfsUser}/sbin/zfs\" load-key -r \"${pool}\""}
             '';
           };
 
@@ -403,6 +408,9 @@ in
           nameValuePair "zfs-sync-${pool}" {
             description = "Sync ZFS pool \"${pool}\"";
             wantedBy = [ "shutdown.target" ];
+            unitConfig = {
+              DefaultDependencies = false;
+            };
             serviceConfig = {
               Type = "oneshot";
               RemainAfterExit = true;
@@ -411,12 +419,15 @@ in
               ${packages.zfsUser}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
             '';
           };
+        createZfsService = serv:
+          nameValuePair serv {
+            after = [ "systemd-modules-load.service" ];
+            wantedBy = [ "zfs.target" ];
+          };
 
-      in listToAttrs (map createImportService dataPools ++ map createSyncService allPools) // {
-        "zfs-mount" = { after = [ "systemd-modules-load.service" ]; };
-        "zfs-share" = { after = [ "systemd-modules-load.service" ]; };
-        "zfs-zed" = { after = [ "systemd-modules-load.service" ]; };
-      };
+      in listToAttrs (map createImportService dataPools ++
+                      map createSyncService allPools ++
+                      map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
 
       systemd.targets."zfs-import" =
         let
@@ -425,6 +436,7 @@ in
           {
             requires = services;
             after = services;
+            wantedBy = [ "zfs.target" ];
           };
 
       systemd.targets."zfs".wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/tasks/network-interfaces-scripted.nix b/nixos/modules/tasks/network-interfaces-scripted.nix
index e754a1e8718d..c4a2bd1f75fd 100644
--- a/nixos/modules/tasks/network-interfaces-scripted.nix
+++ b/nixos/modules/tasks/network-interfaces-scripted.nix
@@ -209,7 +209,7 @@ let
                   ''
                      echo "${cidr}" >> $state
                      echo -n "adding route ${cidr}... "
-                     if out=$(ip route add "${cidr}" ${options} ${via} dev "${i.name}" 2>&1); then
+                     if out=$(ip route add "${cidr}" ${options} ${via} dev "${i.name}" proto static 2>&1); then
                        echo "done"
                      elif ! echo "$out" | grep "File exists" >/dev/null 2>&1; then
                        echo "'ip route add "${cidr}" ${options} ${via} dev "${i.name}"' failed: $out"
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index 14f9b9567515..a3534e10bb17 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -46,22 +46,6 @@ let
     '';
   });
 
-  # Collect all interfaces that are defined for a device
-  # as device:interface key:value pairs.
-  wlanDeviceInterfaces =
-    let
-      allDevices = unique (mapAttrsToList (_: v: v.device) cfg.wlanInterfaces);
-      interfacesOfDevice = d: filterAttrs (_: v: v.device == d) cfg.wlanInterfaces;
-    in
-      genAttrs allDevices (d: interfacesOfDevice d);
-
-  # Convert device:interface key:value pairs into a list, and if it exists,
-  # place the interface which is named after the device at the beginning.
-  wlanListDeviceFirst = device: interfaces:
-    if hasAttr device interfaces
-    then mapAttrsToList (n: v: v//{_iName=n;}) (filterAttrs (n: _: n==device) interfaces) ++ mapAttrsToList (n: v: v//{_iName=n;}) (filterAttrs (n: _: n!=device) interfaces)
-    else mapAttrsToList (n: v: v // {_iName = n;}) interfaces;
-
   # We must escape interfaces due to the systemd interpretation
   subsystemDevice = interface:
     "sys-subsystem-net-devices-${escapeSystemdPath interface}.device";
diff --git a/nixos/modules/tasks/scsi-link-power-management.nix b/nixos/modules/tasks/scsi-link-power-management.nix
index 484c0a0186d7..69599bda6d32 100644
--- a/nixos/modules/tasks/scsi-link-power-management.nix
+++ b/nixos/modules/tasks/scsi-link-power-management.nix
@@ -2,7 +2,20 @@
 
 with lib;
 
-let cfg = config.powerManagement.scsiLinkPolicy; in
+let
+
+  cfg = config.powerManagement.scsiLinkPolicy;
+
+  kernel = config.boot.kernelPackages.kernel;
+
+  allowedValues = [
+    "min_power"
+    "max_performance"
+    "medium_power"
+    "med_power_with_dipm"
+  ];
+
+in
 
 {
   ###### interface
@@ -11,10 +24,13 @@ let cfg = config.powerManagement.scsiLinkPolicy; in
 
     powerManagement.scsiLinkPolicy = mkOption {
       default = null;
-      type = types.nullOr (types.enum [ "min_power" "max_performance" "medium_power" ]);
+      type = types.nullOr (types.enum allowedValues);
       description = ''
         SCSI link power management policy. The kernel default is
         "max_performance".
+        </para><para>
+        "med_power_with_dipm" is supported by kernel versions
+        4.15 and newer.
       '';
     };
 
@@ -24,6 +40,12 @@ let cfg = config.powerManagement.scsiLinkPolicy; in
   ###### implementation
 
   config = mkIf (cfg != null) {
+
+    assertions = singleton {
+      assertion = (cfg == "med_power_with_dipm") -> versionAtLeast kernel.version "4.15";
+      message = "med_power_with_dipm is not supported for kernels older than 4.15";
+    };
+
     services.udev.extraRules = ''
       SUBSYSTEM=="scsi_host", ACTION=="add", KERNEL=="host*", ATTR{link_power_management_policy}="${cfg}"
     '';
diff --git a/nixos/modules/tasks/swraid.nix b/nixos/modules/tasks/swraid.nix
index d6cb1c96ef46..1b142fb8fd36 100644
--- a/nixos/modules/tasks/swraid.nix
+++ b/nixos/modules/tasks/swraid.nix
@@ -6,7 +6,7 @@
 
   services.udev.packages = [ pkgs.mdadm ];
 
-  boot.initrd.availableKernelModules = [ "md_mod" "raid0" "raid1" "raid456" ];
+  boot.initrd.availableKernelModules = [ "md_mod" "raid0" "raid1" "raid10" "raid456" ];
 
   boot.initrd.extraUdevRulesCommands = ''
     cp -v ${pkgs.mdadm}/lib/udev/rules.d/*.rules $out/
diff --git a/nixos/modules/virtualisation/azure-image.nix b/nixos/modules/virtualisation/azure-image.nix
index cb756842f369..dd2108ccc379 100644
--- a/nixos/modules/virtualisation/azure-image.nix
+++ b/nixos/modules/virtualisation/azure-image.nix
@@ -2,13 +2,13 @@
 
 with lib;
 let
-  diskSize = 30720;
+  diskSize = 2048;
 in
 {
   system.build.azureImage = import ../../lib/make-disk-image.nix {
     name = "azure-image";
     postVM = ''
-      ${pkgs.vmTools.qemu-220}/bin/qemu-img convert -f raw -o subformat=fixed -O vpc $diskImage $out/disk.vhd
+      ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o subformat=fixed,force_size -O vpc $diskImage $out/disk.vhd
     '';
     configFile = ./azure-config-user.nix;
     format = "raw";
diff --git a/nixos/modules/virtualisation/azure-qemu-220-no-etc-install.patch b/nixos/modules/virtualisation/azure-qemu-220-no-etc-install.patch
deleted file mode 100644
index 81d29feea3de..000000000000
--- a/nixos/modules/virtualisation/azure-qemu-220-no-etc-install.patch
+++ /dev/null
@@ -1,14 +0,0 @@
-diff --git a/Makefile b/Makefile
-index d6b9dc1..ce7c493 100644
---- a/Makefile
-+++ b/Makefile
-@@ -384,8 +384,7 @@ install-confdir:
- install-sysconfig: install-datadir install-confdir
- 	$(INSTALL_DATA) $(SRC_PATH)/sysconfigs/target/target-x86_64.conf "$(DESTDIR)$(qemu_confdir)"
- 
--install: all $(if $(BUILD_DOCS),install-doc) install-sysconfig \
--install-datadir install-localstatedir
-+install: all $(if $(BUILD_DOCS),install-doc) install-datadir
- ifneq ($(TOOLS),)
- 	$(call install-prog,$(TOOLS),$(DESTDIR)$(bindir))
- endif
diff --git a/nixos/modules/virtualisation/gce-images.nix b/nixos/modules/virtualisation/gce-images.nix
index 8a9bda1b60c2..575bbaadbcdb 100644
--- a/nixos/modules/virtualisation/gce-images.nix
+++ b/nixos/modules/virtualisation/gce-images.nix
@@ -3,6 +3,7 @@ let self = {
   "15.09" = "gs://nixos-cloud-images/nixos-15.09.425.7870f20-x86_64-linux.raw.tar.gz";
   "16.03" = "gs://nixos-cloud-images/nixos-image-16.03.847.8688c17-x86_64-linux.raw.tar.gz";
   "17.03" = "gs://nixos-cloud-images/nixos-image-17.03.1082.4aab5c5798-x86_64-linux.raw.tar.gz";
+  "18.03" = "gs://nixos-cloud-images/nixos-image-18.03.132536.fdb5ba4cdf9-x86_64-linux.raw.tar.gz";
 
-  latest = self."17.03";
+  latest = self."18.03";
 }; in self
diff --git a/nixos/modules/virtualisation/google-compute-image.nix b/nixos/modules/virtualisation/google-compute-image.nix
index 374a84332357..de2c43b8a40a 100644
--- a/nixos/modules/virtualisation/google-compute-image.nix
+++ b/nixos/modules/virtualisation/google-compute-image.nix
@@ -257,7 +257,7 @@ in
               echo "Setup of ssh host keys from http://metadata.google.internal/computeMetadata/v1/instance/attributes/ failed."
               false
           fi
-          rm -f $SSH_HOST_KEYS_DIR
+          rm -rf $SSH_HOST_KEYS_DIR
         '';
       serviceConfig.Type = "oneshot";
       serviceConfig.RemainAfterExit = true;
diff --git a/nixos/modules/virtualisation/kvmgt.nix b/nixos/modules/virtualisation/kvmgt.nix
new file mode 100644
index 000000000000..fc0bedb68bd0
--- /dev/null
+++ b/nixos/modules/virtualisation/kvmgt.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.kvmgt;
+  kernelPackages = config.boot.kernelPackages;
+  vgpuOptions = {
+    uuid = mkOption {
+      type = types.string;
+      description = "UUID of VGPU device. You can generate one with <package>libossp_uuid</package>.";
+    };
+  };
+in {
+  options = {
+    virtualisation.kvmgt = {
+      enable = mkEnableOption ''
+        KVMGT (iGVT-g) VGPU support. Allows Qemu/KVM guests to share host's Intel integrated graphics card.
+        Currently only one graphical device can be shared
+      '';
+      # multi GPU support is under the question
+      device = mkOption {
+        type = types.string;
+        default = "0000:00:02.0";
+        description = "PCI ID of graphics card. You can figure it with <command>ls /sys/class/mdev_bus</command>.";
+      };
+      vgpus = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule [ { options = vgpuOptions; } ]);
+        description = ''
+          Virtual GPUs to be used in Qemu. You can find devices via <command>ls /sys/bus/pci/devices/*/mdev_supported_types</command>
+          and find info about device via <command>cat /sys/bus/pci/devices/*/mdev_supported_types/i915-GVTg_V5_4/description</command>
+        '';
+        example = {
+          "i915-GVTg_V5_8" = {
+            uuid = "a297db4a-f4c2-11e6-90f6-d3b88d6c9525";
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = singleton {
+      assertion = versionAtLeast kernelPackages.kernel.version "4.16";
+      message = "KVMGT is not properly supported for kernels older than 4.16";
+    };
+    boot.kernelParams = [ "i915.enable_gvt=1" ];
+    systemd.services = mapAttrs' (name: value:
+      nameValuePair "kvmgt-${name}" {
+        description = "KVMGT VGPU ${name}";
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+          ExecStart = "${pkgs.runtimeShell} -c 'echo ${value.uuid} > /sys/bus/pci/devices/${cfg.device}/mdev_supported_types/${name}/create'";
+          ExecStop = "${pkgs.runtimeShell} -c 'echo 1 > /sys/bus/pci/devices/${cfg.device}/${value.uuid}/remove'";
+        };
+        wantedBy = [ "multi-user.target" ];
+      }
+    ) cfg.vgpus;
+  };
+
+  meta.maintainers = with maintainers; [ gnidorah ];
+}