summary refs log tree commit diff
path: root/nixos/modules/services
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/services')
-rw-r--r--nixos/modules/services/audio/alsa.nix2
-rw-r--r--nixos/modules/services/audio/mopidy.nix14
-rw-r--r--nixos/modules/services/backup/almir.nix173
-rw-r--r--nixos/modules/services/backup/borgbackup.nix580
-rw-r--r--nixos/modules/services/backup/crashplan-small-business.nix74
-rw-r--r--nixos/modules/services/backup/duplicati.nix40
-rw-r--r--nixos/modules/services/backup/restic.nix150
-rw-r--r--nixos/modules/services/backup/tarsnap.nix69
-rw-r--r--nixos/modules/services/backup/znapzend.nix369
-rw-r--r--nixos/modules/services/cluster/kubernetes/dashboard.nix4
-rw-r--r--nixos/modules/services/cluster/kubernetes/default.nix22
-rw-r--r--nixos/modules/services/computing/slurm/slurm.nix31
-rw-r--r--nixos/modules/services/continuous-integration/buildkite-agent.nix181
-rw-r--r--nixos/modules/services/continuous-integration/hydra/default.nix13
-rw-r--r--nixos/modules/services/continuous-integration/jenkins/default.nix9
-rw-r--r--nixos/modules/services/databases/4store-endpoint.nix2
-rw-r--r--nixos/modules/services/databases/4store.nix2
-rw-r--r--nixos/modules/services/databases/memcached.nix57
-rw-r--r--nixos/modules/services/databases/mysql.nix23
-rw-r--r--nixos/modules/services/databases/openldap.nix52
-rw-r--r--nixos/modules/services/databases/pgmanage.nix6
-rw-r--r--nixos/modules/services/databases/postgresql.nix7
-rw-r--r--nixos/modules/services/databases/redis.nix1
-rw-r--r--nixos/modules/services/desktops/gnome3/at-spi2-core.nix21
-rw-r--r--nixos/modules/services/desktops/gnome3/chrome-gnome-shell.nix27
-rw-r--r--nixos/modules/services/desktops/gnome3/evolution-data-server.nix6
-rw-r--r--nixos/modules/services/desktops/gnome3/gnome-keyring.nix4
-rw-r--r--nixos/modules/services/desktops/gnome3/gnome-online-accounts.nix4
-rw-r--r--nixos/modules/services/desktops/gnome3/gnome-terminal-server.nix6
-rw-r--r--nixos/modules/services/desktops/gnome3/tracker-miners.nix41
-rw-r--r--nixos/modules/services/desktops/pipewire.nix23
-rw-r--r--nixos/modules/services/desktops/telepathy.nix4
-rw-r--r--nixos/modules/services/editors/emacs.nix23
-rw-r--r--nixos/modules/services/games/factorio.nix20
-rw-r--r--nixos/modules/services/games/ghost-one.nix105
-rw-r--r--nixos/modules/services/hardware/acpid.nix40
-rw-r--r--nixos/modules/services/hardware/amd-hybrid-graphics.nix46
-rw-r--r--nixos/modules/services/hardware/bluetooth.nix12
-rw-r--r--nixos/modules/services/hardware/fwupd.nix90
-rw-r--r--nixos/modules/services/hardware/nvidia-optimus.nix2
-rw-r--r--nixos/modules/services/hardware/thinkfan.nix2
-rw-r--r--nixos/modules/services/hardware/trezord.nix2
-rw-r--r--nixos/modules/services/hardware/u2f.nix23
-rw-r--r--nixos/modules/services/hardware/udev.nix2
-rw-r--r--nixos/modules/services/hardware/usbmuxd.nix74
-rw-r--r--nixos/modules/services/logging/graylog.nix2
-rw-r--r--nixos/modules/services/logging/logcheck.nix2
-rw-r--r--nixos/modules/services/logging/logstash.nix2
-rw-r--r--nixos/modules/services/mail/clamsmtp.nix179
-rw-r--r--nixos/modules/services/mail/dkimproxy-out.nix118
-rw-r--r--nixos/modules/services/mail/dovecot.nix5
-rw-r--r--nixos/modules/services/mail/postfix.nix145
-rw-r--r--nixos/modules/services/mail/rspamd.nix268
-rw-r--r--nixos/modules/services/misc/defaultUnicornConfig.rb240
-rw-r--r--nixos/modules/services/misc/disnix.nix16
-rw-r--r--nixos/modules/services/misc/dysnomia.nix68
-rw-r--r--nixos/modules/services/misc/folding-at-home.nix2
-rw-r--r--nixos/modules/services/misc/geoip-updater.nix2
-rw-r--r--nixos/modules/services/misc/gitea.nix56
-rw-r--r--nixos/modules/services/misc/gitit.nix2
-rw-r--r--nixos/modules/services/misc/gitlab.nix26
-rw-r--r--nixos/modules/services/misc/gitolite.nix2
-rw-r--r--nixos/modules/services/misc/gitweb.nix59
-rw-r--r--nixos/modules/services/misc/gogs.nix3
-rw-r--r--nixos/modules/services/misc/gollum.nix21
-rw-r--r--nixos/modules/services/misc/home-assistant.nix144
-rw-r--r--nixos/modules/services/misc/ihaskell.nix2
-rw-r--r--nixos/modules/services/misc/logkeys.nix2
-rw-r--r--nixos/modules/services/misc/matrix-synapse.nix102
-rw-r--r--nixos/modules/services/misc/mbpfan.nix8
-rw-r--r--nixos/modules/services/misc/mesos-slave.nix4
-rw-r--r--nixos/modules/services/misc/nix-daemon.nix69
-rw-r--r--nixos/modules/services/misc/nix-ssh-serve.nix24
-rw-r--r--nixos/modules/services/misc/nixos-manual.nix18
-rw-r--r--nixos/modules/services/misc/novacomd.nix31
-rw-r--r--nixos/modules/services/misc/osrm.nix85
-rw-r--r--nixos/modules/services/misc/parsoid.nix4
-rw-r--r--nixos/modules/services/misc/safeeyes.nix50
-rw-r--r--nixos/modules/services/misc/serviio.nix92
-rw-r--r--nixos/modules/services/misc/ssm-agent.nix4
-rw-r--r--nixos/modules/services/misc/xmr-stak.nix73
-rw-r--r--nixos/modules/services/misc/zookeeper.nix11
-rw-r--r--nixos/modules/services/monitoring/apcupsd.nix2
-rw-r--r--nixos/modules/services/monitoring/fusion-inventory.nix3
-rw-r--r--nixos/modules/services/monitoring/grafana.nix20
-rw-r--r--nixos/modules/services/monitoring/monit.nix16
-rw-r--r--nixos/modules/services/monitoring/munin.nix68
-rw-r--r--nixos/modules/services/monitoring/netdata.nix54
-rw-r--r--nixos/modules/services/monitoring/prometheus/alertmanager.nix10
-rw-r--r--nixos/modules/services/monitoring/prometheus/blackbox-exporter.nix68
-rw-r--r--nixos/modules/services/monitoring/prometheus/collectd-exporter.nix128
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix173
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.xml135
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix31
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/collectd.nix78
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix50
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix39
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/json.nix36
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/minio.nix65
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/nginx.nix31
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/node.nix39
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/postfix.nix81
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/snmp.nix71
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/unifi.nix67
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/varnish.nix21
-rw-r--r--nixos/modules/services/monitoring/prometheus/fritzbox-exporter.nix76
-rw-r--r--nixos/modules/services/monitoring/prometheus/json-exporter.nix74
-rw-r--r--nixos/modules/services/monitoring/prometheus/minio-exporter.nix117
-rw-r--r--nixos/modules/services/monitoring/prometheus/nginx-exporter.nix78
-rw-r--r--nixos/modules/services/monitoring/prometheus/node-exporter.nix87
-rw-r--r--nixos/modules/services/monitoring/prometheus/snmp-exporter.nix127
-rw-r--r--nixos/modules/services/monitoring/prometheus/unifi-exporter.nix105
-rw-r--r--nixos/modules/services/monitoring/prometheus/varnish-exporter.nix61
-rw-r--r--nixos/modules/services/monitoring/smartd.nix20
-rw-r--r--nixos/modules/services/monitoring/statsd.nix31
-rw-r--r--nixos/modules/services/network-filesystems/beegfs.nix343
-rw-r--r--nixos/modules/services/network-filesystems/ceph.nix371
-rw-r--r--nixos/modules/services/network-filesystems/davfs2.nix74
-rw-r--r--nixos/modules/services/network-filesystems/ipfs.nix14
-rw-r--r--nixos/modules/services/network-filesystems/openafs-client/default.nix99
-rw-r--r--nixos/modules/services/network-filesystems/openafs/client.nix239
-rw-r--r--nixos/modules/services/network-filesystems/openafs/lib.nix28
-rw-r--r--nixos/modules/services/network-filesystems/openafs/server.nix260
-rw-r--r--nixos/modules/services/network-filesystems/samba.nix14
-rw-r--r--nixos/modules/services/network-filesystems/xtreemfs.nix2
-rw-r--r--nixos/modules/services/network-filesystems/yandex-disk.nix4
-rw-r--r--nixos/modules/services/networking/amuled.nix2
-rw-r--r--nixos/modules/services/networking/aria2.nix10
-rw-r--r--nixos/modules/services/networking/bird.nix23
-rw-r--r--nixos/modules/services/networking/connman.nix11
-rw-r--r--nixos/modules/services/networking/dante.nix2
-rw-r--r--nixos/modules/services/networking/ddclient.nix112
-rw-r--r--nixos/modules/services/networking/dhcpcd.nix6
-rw-r--r--nixos/modules/services/networking/dhcpd.nix1
-rw-r--r--nixos/modules/services/networking/dnscache.nix31
-rw-r--r--nixos/modules/services/networking/dnscrypt-proxy.nix6
-rw-r--r--nixos/modules/services/networking/dnscrypt-wrapper.nix10
-rw-r--r--nixos/modules/services/networking/firefox/sync-server.nix52
-rw-r--r--nixos/modules/services/networking/firewall.nix5
-rw-r--r--nixos/modules/services/networking/flashpolicyd.nix2
-rw-r--r--nixos/modules/services/networking/freeradius.nix72
-rw-r--r--nixos/modules/services/networking/gnunet.nix2
-rw-r--r--nixos/modules/services/networking/i2pd.nix17
-rw-r--r--nixos/modules/services/networking/iwd.nix2
-rw-r--r--nixos/modules/services/networking/kresd.nix48
-rw-r--r--nixos/modules/services/networking/lldpd.nix1
-rw-r--r--nixos/modules/services/networking/monero.nix238
-rw-r--r--nixos/modules/services/networking/mosquitto.nix16
-rw-r--r--nixos/modules/services/networking/murmur.nix3
-rw-r--r--nixos/modules/services/networking/nat.nix80
-rw-r--r--nixos/modules/services/networking/networkmanager.nix48
-rw-r--r--nixos/modules/services/networking/nftables.nix2
-rw-r--r--nixos/modules/services/networking/nix-serve.nix2
-rw-r--r--nixos/modules/services/networking/nixops-dns.nix79
-rw-r--r--nixos/modules/services/networking/nsd.nix6
-rw-r--r--nixos/modules/services/networking/openvpn.nix30
-rw-r--r--nixos/modules/services/networking/prosody.nix337
-rw-r--r--nixos/modules/services/networking/quagga.nix2
-rw-r--r--nixos/modules/services/networking/radvd.nix15
-rw-r--r--nixos/modules/services/networking/rdnssd.nix2
-rw-r--r--nixos/modules/services/networking/resilio.nix9
-rw-r--r--nixos/modules/services/networking/rxe.nix63
-rw-r--r--nixos/modules/services/networking/shadowsocks.nix112
-rw-r--r--nixos/modules/services/networking/softether.nix33
-rw-r--r--nixos/modules/services/networking/ssh/sshd.nix94
-rw-r--r--nixos/modules/services/networking/strongswan.nix23
-rw-r--r--nixos/modules/services/networking/stunnel.nix221
-rw-r--r--nixos/modules/services/networking/syncthing.nix30
-rw-r--r--nixos/modules/services/networking/tcpcrypt.nix10
-rw-r--r--nixos/modules/services/networking/tinc.nix2
-rw-r--r--nixos/modules/services/networking/tox-bootstrapd.nix2
-rw-r--r--nixos/modules/services/networking/unbound.nix2
-rw-r--r--nixos/modules/services/networking/zerotierone.nix22
-rw-r--r--nixos/modules/services/printing/cupsd.nix7
-rw-r--r--nixos/modules/services/scheduling/fcron.nix28
-rw-r--r--nixos/modules/services/search/elasticsearch.nix15
-rw-r--r--nixos/modules/services/security/clamav.nix13
-rw-r--r--nixos/modules/services/security/hologram-agent.nix7
-rw-r--r--nixos/modules/services/security/hologram-server.nix38
-rw-r--r--nixos/modules/services/security/physlock.nix64
-rw-r--r--nixos/modules/services/security/tor.nix138
-rw-r--r--nixos/modules/services/security/torify.nix2
-rw-r--r--nixos/modules/services/security/torsocks.nix2
-rw-r--r--nixos/modules/services/security/usbguard.nix4
-rw-r--r--nixos/modules/services/system/localtime.nix60
-rw-r--r--nixos/modules/services/torrent/transmission.nix22
-rw-r--r--nixos/modules/services/ttys/agetty.nix4
-rw-r--r--nixos/modules/services/web-apps/atlassian/jira.nix2
-rw-r--r--nixos/modules/services/web-apps/matomo-doc.xml (renamed from nixos/modules/services/web-apps/piwik-doc.xml)42
-rw-r--r--nixos/modules/services/web-apps/matomo.nix (renamed from nixos/modules/services/web-apps/piwik.nix)75
-rw-r--r--nixos/modules/services/web-apps/nexus.nix4
-rw-r--r--nixos/modules/services/web-apps/nixbot.nix149
-rw-r--r--nixos/modules/services/web-apps/pump.io-configure.js23
-rw-r--r--nixos/modules/services/web-apps/pump.io.nix438
-rw-r--r--nixos/modules/services/web-apps/restya-board.nix384
-rw-r--r--nixos/modules/services/web-apps/tt-rss.nix73
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/owncloud.nix48
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/per-server-options.nix2
-rw-r--r--nixos/modules/services/web-servers/lighttpd/default.nix3
-rw-r--r--nixos/modules/services/web-servers/lighttpd/gitweb.nix35
-rw-r--r--nixos/modules/services/web-servers/lighttpd/inginious.nix2
-rw-r--r--nixos/modules/services/web-servers/mighttpd2.nix132
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix28
-rw-r--r--nixos/modules/services/web-servers/nginx/gitweb.nix61
-rw-r--r--nixos/modules/services/web-servers/nginx/vhost-options.nix30
-rw-r--r--nixos/modules/services/web-servers/tomcat.nix382
-rw-r--r--nixos/modules/services/web-servers/traefik.nix12
-rw-r--r--nixos/modules/services/web-servers/varnish/default.nix30
-rw-r--r--nixos/modules/services/x11/compton.nix4
-rw-r--r--nixos/modules/services/x11/desktop-managers/default.nix12
-rw-r--r--nixos/modules/services/x11/desktop-managers/enlightenment.nix4
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome3.nix24
-rw-r--r--nixos/modules/services/x11/desktop-managers/lxqt.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/mate.nix50
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix36
-rw-r--r--nixos/modules/services/x11/desktop-managers/xfce.nix146
-rw-r--r--nixos/modules/services/x11/display-managers/default.nix45
-rw-r--r--nixos/modules/services/x11/display-managers/gdm.nix13
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix35
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix23
-rw-r--r--nixos/modules/services/x11/display-managers/sddm.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/slim.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/xpra.nix2
-rw-r--r--nixos/modules/services/x11/hardware/libinput.nix9
-rw-r--r--nixos/modules/services/x11/window-managers/2bwm.nix4
-rw-r--r--nixos/modules/services/x11/window-managers/awesome.nix11
-rw-r--r--nixos/modules/services/x11/window-managers/default.nix5
-rw-r--r--nixos/modules/services/x11/window-managers/evilwm.nix25
-rw-r--r--nixos/modules/services/x11/xautolock.nix28
-rw-r--r--nixos/modules/services/x11/xserver.nix46
230 files changed, 9135 insertions, 3559 deletions
diff --git a/nixos/modules/services/audio/alsa.nix b/nixos/modules/services/audio/alsa.nix
index acf48d3c3d03..e3e8bb28c58b 100644
--- a/nixos/modules/services/audio/alsa.nix
+++ b/nixos/modules/services/audio/alsa.nix
@@ -21,7 +21,7 @@ in
 
       enable = mkOption {
         type = types.bool;
-        default = true;
+        default = false;
         description = ''
           Whether to enable ALSA sound.
         '';
diff --git a/nixos/modules/services/audio/mopidy.nix b/nixos/modules/services/audio/mopidy.nix
index c0a0f0374294..52613d450b51 100644
--- a/nixos/modules/services/audio/mopidy.nix
+++ b/nixos/modules/services/audio/mopidy.nix
@@ -4,17 +4,22 @@ with pkgs;
 with lib;
 
 let
-
   uid = config.ids.uids.mopidy;
   gid = config.ids.gids.mopidy;
   cfg = config.services.mopidy;
 
   mopidyConf = writeText "mopidy.conf" cfg.configuration;
 
-  mopidyEnv = python.buildEnv.override {
-    extraLibs = [ mopidy ] ++ cfg.extensionPackages;
+  mopidyEnv = buildEnv {
+    name = "mopidy-with-extensions-${mopidy.version}";
+    paths = closePropagation cfg.extensionPackages;
+    pathsToLink = [ "/${python.sitePackages}" ];
+    buildInputs = [ makeWrapper ];
+    postBuild = ''
+      makeWrapper ${mopidy}/bin/mopidy $out/bin/mopidy \
+        --prefix PYTHONPATH : $out/${python.sitePackages}
+    '';
   };
-
 in {
 
   options = {
@@ -61,7 +66,6 @@ in {
 
   };
 
-
   ###### implementation
 
   config = mkIf cfg.enable {
diff --git a/nixos/modules/services/backup/almir.nix b/nixos/modules/services/backup/almir.nix
deleted file mode 100644
index fbb4ff4034f1..000000000000
--- a/nixos/modules/services/backup/almir.nix
+++ /dev/null
@@ -1,173 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-  cfg = config.services.almir;
-
-  bconsoleconf = pkgs.writeText "bconsole.conf"
-    ''
-      Director {
-        Name = ${cfg.director_name}
-        DIRport = ${toString cfg.director_port}
-        address = ${cfg.director_address}
-        Password = "${cfg.director_password}"
-      }
-    '';
-
-  productionini = pkgs.writeText "production.ini"
-    ''
-[app:main]
-use = egg:almir
-
-pyramid.reload_templates = false
-pyramid.debug_authorization = false
-pyramid.debug_notfound = false
-pyramid.debug_routematch = false
-pyramid.debug_templates = false
-pyramid.default_locale_name = en
-pyramid.includes =
-    pyramid_exclog
-exclog.extra_info = true
-
-sqlalchemy.url = ${cfg.sqlalchemy_engine_url}
-timezone = ${cfg.timezone}
-bconsole_config = ${bconsoleconf}
-
-[server:main]
-use = egg:waitress#main
-host = 127.0.0.1
-port = ${toString cfg.port}
-
-
-# Begin logging configuration
-
-[loggers]
-keys = root, almir, sqlalchemy, exc_logger
-
-[handlers]
-keys = console
-
-[formatters]
-keys = generic
-
-[logger_root]
-level = WARN
-handlers = console
-
-[logger_almir]
-level = WARN
-handlers =
-qualname = almir
-
-[logger_exc_logger]
-level = ERROR
-handlers =
-qualname = exc_logger
-
-[logger_sqlalchemy]
-level = WARN
-handlers =
-qualname = sqlalchemy.engine
-# "level = INFO" logs SQL queries.
-# "level = DEBUG" logs SQL queries and results.
-# "level = WARN" logs neither.  (Recommended for production systems.)
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatter_generic]
-format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s
-    '';
-in {
-  options = {
-    services.almir = {
-      enable = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Enable Almir web server. Also configures postgresql database and installs bacula.
-        '';
-      };
-
-      port = mkOption {
-        default = 35000;
-        type = types.int;
-        description = ''
-          Port for Almir web server to listen on.
-        '';
-      };
-
-      timezone = mkOption {
-	description = ''
-         Timezone as specified in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
-        '';
-        example = "Europe/Ljubljana";
-      };
-
-      sqlalchemy_engine_url = mkOption {
-        default = "postgresql:///bacula";
-        example = ''
-          postgresql://bacula:bacula@localhost:5432/bacula
-          mysql+mysqlconnector://<user>:<password>@<hostname>/<database>'
-          sqlite:////var/lib/bacula/bacula.db'
-        '';
-	description = ''
-         Define SQL database connection to bacula catalog as specified in http://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
-        '';
-      };
-
-      director_name = mkOption {
-        description = ''
-          Name of the Director to connect with bconsole.
-        '';
-      };
-
-      director_password = mkOption {
-        description = ''
-          Password for Director to connect with bconsole.
-        '';
-      };
-
-      director_port = mkOption {
-        default = 9101;
-        type = types.int;
-        description = ''
-          Port for Director to connect with bconsole.
-        '';
-      };
-
-      director_address = mkOption {
-        default = "127.0.0.1";
-        description = ''
-          IP/Hostname for Director to connect with bconsole.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    systemd.services.almir = {
-      after = [ "network.target" "postgresql.service" ];
-      description = "Almir web app";
-      wantedBy = [ "multi-user.target" ];
-      path = [ pkgs.pythonPackages.almir ];
-      environment.PYTHONPATH = "${pkgs.pythonPackages.almir}/lib/${pkgs.pythonPackages.python.libPrefix}/site-packages";
-      serviceConfig.ExecStart = "${pkgs.pythonPackages.pyramid}/bin/pserve ${productionini}";
-    };
-
-    environment.systemPackages = [ pkgs.pythonPackages.almir ];
-
-    users.extraUsers.almir = {
-      group = "almir";
-      uid = config.ids.uids.almir;
-      createHome = true;
-      shell = "${pkgs.bash}/bin/bash";
-    };
-
-    users.extraGroups.almir.gid = config.ids.gids.almir;
-  };
-}
diff --git a/nixos/modules/services/backup/borgbackup.nix b/nixos/modules/services/backup/borgbackup.nix
new file mode 100644
index 000000000000..1b730e0c2b76
--- /dev/null
+++ b/nixos/modules/services/backup/borgbackup.nix
@@ -0,0 +1,580 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  isLocalPath = x:
+    builtins.substring 0 1 x == "/"      # absolute path
+    || builtins.substring 0 1 x == "."   # relative path
+    || builtins.match "[.*:.*]" == null; # not machine:path
+ 
+  mkExcludeFile = cfg:
+    # Write each exclude pattern to a new line
+    pkgs.writeText "excludefile" (concatStringsSep "\n" cfg.exclude);
+
+  mkKeepArgs = cfg:
+    # If cfg.prune.keep e.g. has a yearly attribute,
+    # its content is passed on as --keep-yearly
+    concatStringsSep " "
+      (mapAttrsToList (x: y: "--keep-${x}=${toString y}") cfg.prune.keep);
+
+  mkBackupScript = cfg: ''
+    on_exit()
+    {
+      exitStatus=$?
+      # Reset the EXIT handler, or else we're called again on 'exit' below
+      trap - EXIT
+      ${cfg.postHook}
+      exit $exitStatus
+    }
+    trap 'on_exit' INT TERM QUIT EXIT
+
+    archiveName="${cfg.archiveBaseName}-$(date ${cfg.dateFormat})"
+    archiveSuffix="${optionalString cfg.appendFailedSuffix ".failed"}"
+    ${cfg.preHook}
+  '' + optionalString cfg.doInit ''
+    # Run borg init if the repo doesn't exist yet
+    if ! borg list > /dev/null; then
+      borg init \
+        --encryption ${cfg.encryption.mode} \
+        $extraInitArgs
+      ${cfg.postInit}
+    fi
+  '' + ''
+    borg create \
+      --compression ${cfg.compression} \
+      --exclude-from ${mkExcludeFile cfg} \
+      $extraCreateArgs \
+      "::$archiveName$archiveSuffix" \
+      ${escapeShellArgs cfg.paths}
+  '' + optionalString cfg.appendFailedSuffix ''
+    borg rename "::$archiveName$archiveSuffix" "$archiveName"
+  '' + ''
+    ${cfg.postCreate}
+  '' + optionalString (cfg.prune.keep != { }) ''
+    borg prune \
+      ${mkKeepArgs cfg} \
+      --prefix ${escapeShellArg cfg.prune.prefix} \
+      $extraPruneArgs
+    ${cfg.postPrune}
+  '';
+
+  mkPassEnv = cfg: with cfg.encryption;
+    if passCommand != null then
+      { BORG_PASSCOMMAND = passCommand; }
+    else if passphrase != null then
+      { BORG_PASSPHRASE = passphrase; }
+    else { };
+
+  mkBackupService = name: cfg: 
+    let
+      userHome = config.users.users.${cfg.user}.home;
+    in nameValuePair "borgbackup-job-${name}" {
+      description = "BorgBackup job ${name}";
+      path = with pkgs; [
+        borgbackup openssh
+      ];
+      script = mkBackupScript cfg;
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        # Only run when no other process is using CPU or disk
+        CPUSchedulingPolicy = "idle";
+        IOSchedulingClass = "idle";
+        ProtectSystem = "strict";
+        ReadWritePaths =
+          [ "${userHome}/.config/borg" "${userHome}/.cache/borg" ]
+          # Borg needs write access to repo if it is not remote
+          ++ optional (isLocalPath cfg.repo) cfg.repo;
+        PrivateTmp = true;
+      };
+      environment = {
+        BORG_REPO = cfg.repo;
+        inherit (cfg) extraInitArgs extraCreateArgs extraPruneArgs;
+      } // (mkPassEnv cfg) // cfg.environment;
+      inherit (cfg) startAt;
+    };
+
+  # Paths listed in ReadWritePaths must exist before service is started
+  mkActivationScript = name: cfg:
+    let
+      install = "install -o ${cfg.user} -g ${cfg.group}";
+    in
+      nameValuePair "borgbackup-job-${name}" (stringAfter [ "users" ] (''
+        # Eensure that the home directory already exists
+        # We can't assert createHome == true because that's not the case for root
+        cd "${config.users.users.${cfg.user}.home}"                                                                                                         
+        ${install} -d .config/borg
+        ${install} -d .cache/borg
+      '' + optionalString (isLocalPath cfg.repo) ''
+        ${install} -d ${escapeShellArg cfg.repo}
+      ''));
+
+  mkPassAssertion = name: cfg: {
+    assertion = with cfg.encryption;
+      mode != "none" -> passCommand != null || passphrase != null;
+    message =
+      "passCommand or passphrase has to be specified because"
+      + '' borgbackup.jobs.${name}.encryption != "none"'';
+  };
+
+  mkRepoService = name: cfg:
+    nameValuePair "borgbackup-repo-${name}" {
+      description = "Create BorgBackup repository ${name} directory";
+      script = ''
+        mkdir -p ${escapeShellArg cfg.path}
+        chown ${cfg.user}:${cfg.group} ${escapeShellArg cfg.path}
+      '';
+      serviceConfig = {
+        # The service's only task is to ensure that the specified path exists
+        Type = "oneshot";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+
+  mkAuthorizedKey = cfg: appendOnly: key:
+    let
+      # Because of the following line, clients do not need to specify an absolute repo path
+      cdCommand = "cd ${escapeShellArg cfg.path}";
+      restrictedArg = "--restrict-to-${if cfg.allowSubRepos then "path" else "repository"} .";
+      appendOnlyArg = optionalString appendOnly "--append-only";
+      quotaArg = optionalString (cfg.quota != null) "--storage-quota ${cfg.quota}";
+      serveCommand = "borg serve ${restrictedArg} ${appendOnlyArg} ${quotaArg}";
+    in
+      ''command="${cdCommand} && ${serveCommand}",restrict ${key}'';
+
+  mkUsersConfig = name: cfg: {
+    users.${cfg.user} = {
+      openssh.authorizedKeys.keys =
+        (map (mkAuthorizedKey cfg false) cfg.authorizedKeys
+        ++ map (mkAuthorizedKey cfg true) cfg.authorizedKeysAppendOnly);
+      useDefaultShell = true;
+    };
+    groups.${cfg.group} = { };
+  };
+
+  mkKeysAssertion = name: cfg: {
+    assertion = cfg.authorizedKeys != [ ] || cfg.authorizedKeysAppendOnly != [ ];
+    message =
+      "borgbackup.repos.${name} does not make sense"
+      + " without at least one public key";
+  };
+
+in {
+  meta.maintainers = with maintainers; [ dotlambda ];
+
+  ###### interface
+
+  options.services.borgbackup.jobs = mkOption {
+    description = "Deduplicating backups using BorgBackup.";
+    default = { };
+    example = literalExample ''
+      {
+        rootBackup = {
+          paths = "/";
+          exclude = [ "/nix" ];
+          repo = "/path/to/local/repo";
+          encryption = {
+            mode = "repokey";
+            passphrase = "secret";
+          };
+          compression = "auto,lzma";
+          startAt = "weekly";
+        };
+      }
+    '';
+    type = types.attrsOf (types.submodule (let globalConfig = config; in
+      { name, config, ... }: {
+        options = {
+
+          paths = mkOption {
+            type = with types; either path (nonEmptyListOf path);
+            description = "Path(s) to back up.";
+            example = "/home/user";
+            apply = x: if isList x then x else [ x ];
+          };
+
+          repo = mkOption {
+            type = types.str;
+            description = "Remote or local repository to back up to.";
+            example = "user@machine:/path/to/repo";
+          };
+
+          archiveBaseName = mkOption {
+            type = types.strMatching "[^/{}]+";
+            default = "${globalConfig.networking.hostName}-${name}";
+            defaultText = "\${config.networking.hostName}-<name>";
+            description = ''
+              How to name the created archives. A timestamp, whose format is
+              determined by <option>dateFormat</option>, will be appended. The full
+              name can be modified at runtime (<literal>$archiveName</literal>).
+              Placeholders like <literal>{hostname}</literal> must not be used.
+            '';
+          };
+
+          dateFormat = mkOption {
+            type = types.str;
+            description = ''
+              Arguments passed to <command>date</command>
+              to create a timestamp suffix for the archive name.
+            '';
+            default = "+%Y-%m-%dT%H:%M:%S";
+            example = "-u +%s";
+          };
+
+          startAt = mkOption {
+            type = with types; either str (listOf str);
+            default = "daily";
+            description = ''
+              When or how often the backup should run.
+              Must be in the format described in
+              <citerefentry><refentrytitle>systemd.time</refentrytitle>
+              <manvolnum>7</manvolnum></citerefentry>.
+              If you do not want the backup to start
+              automatically, use <literal>[ ]</literal>.
+            '';
+          };
+
+          user = mkOption {
+            type = types.str;
+            description = ''
+              The user <command>borg</command> is run as.
+              User or group need read permission
+              for the specified <option>paths</option>.
+            '';
+            default = "root";
+          };
+
+          group = mkOption {
+            type = types.str;
+            description = ''
+              The group borg is run as. User or group needs read permission
+              for the specified <option>paths</option>.
+            '';
+            default = "root";
+          };
+
+          encryption.mode = mkOption {
+            type = types.enum [
+              "repokey" "keyfile"
+              "repokey-blake2" "keyfile-blake2"
+              "authenticated" "authenticated-blake2"
+              "none"
+            ];
+            description = ''
+              Encryption mode to use. Setting a mode
+              other than <literal>"none"</literal> requires
+              you to specify a <option>passCommand</option>
+              or a <option>passphrase</option>.
+            '';
+          };
+
+          encryption.passCommand = mkOption {
+            type = with types; nullOr str;
+            description = ''
+              A command which prints the passphrase to stdout.
+              Mutually exclusive with <option>passphrase</option>.
+            '';
+            default = null;
+            example = "cat /path/to/passphrase_file";
+          };
+
+          encryption.passphrase = mkOption {
+            type = with types; nullOr str;
+            description = ''
+              The passphrase the backups are encrypted with.
+              Mutually exclusive with <option>passCommand</option>.
+              If you do not want the passphrase to be stored in the
+              world-readable Nix store, use <option>passCommand</option>.
+            '';
+            default = null;
+          };
+
+          compression = mkOption {
+            # "auto" is optional,
+            # compression mode must be given,
+            # compression level is optional
+            type = types.strMatching "none|(auto,)?(lz4|zstd|zlib|lzma)(,[[:digit:]]{1,2})?";
+            description = ''
+              Compression method to use. Refer to
+              <command>borg help compression</command>
+              for all available options.
+            '';
+            default = "lz4";
+            example = "auto,lzma";
+          };
+
+          exclude = mkOption {
+            type = with types; listOf str;
+            description = ''
+              Exclude paths matching any of the given patterns. See
+              <command>borg help patterns</command> for pattern syntax.
+            '';
+            default = [ ];
+            example = [
+              "/home/*/.cache"
+              "/nix"
+            ];
+          };
+
+          doInit = mkOption {
+            type = types.bool;
+            description = ''
+              Run <command>borg init</command> if the
+              specified <option>repo</option> does not exist.
+              You should set this to <literal>false</literal>
+              if the repository is located on an external drive
+              that might not always be mounted.
+            '';
+            default = true;
+          };
+
+          appendFailedSuffix = mkOption {
+            type = types.bool;
+            description = ''
+              Append a <literal>.failed</literal> suffix
+              to the archive name, which is only removed if
+              <command>borg create</command> has a zero exit status.
+            '';
+            default = true;
+          };
+
+          prune.keep = mkOption {
+            # Specifying e.g. `prune.keep.yearly = -1`
+            # means there is no limit of yearly archives to keep
+            # The regex is for use with e.g. --keep-within 1y
+            type = with types; attrsOf (either int (strMatching "[[:digit:]]+[Hdwmy]"));
+            description = ''
+              Prune a repository by deleting all archives not matching any of the
+              specified retention options. See <command>borg help prune</command>
+              for the available options.
+            '';
+            default = { };
+            example = literalExample ''
+              {
+                within = "1d"; # Keep all archives from the last day
+                daily = 7;
+                weekly = 4;
+                monthly = -1;  # Keep at least one archive for each month
+              }
+            '';
+          };
+
+          prune.prefix = mkOption {
+            type = types.str;
+            description = ''
+              Only consider archive names starting with this prefix for pruning.
+              By default, only archives created by this job are considered.
+              Use <literal>""</literal> to consider all archives.
+            '';
+            default = config.archiveBaseName;
+            defaultText = "\${archiveBaseName}";
+          };
+
+          environment = mkOption {
+            type = with types; attrsOf str;
+            description = ''
+              Environment variables passed to the backup script.
+              You can for example specify which SSH key to use.
+            '';
+            default = { };
+            example = { BORG_RSH = "ssh -i /path/to/key"; };
+          };
+
+          preHook = mkOption {
+            type = types.lines;
+            description = ''
+              Shell commands to run before the backup.
+              This can for example be used to mount file systems.
+            '';
+            default = "";
+            example = ''
+              # To add excluded paths at runtime
+              extraCreateArgs="$extraCreateArgs --exclude /some/path"
+            '';
+          };
+
+          postInit = mkOption {
+            type = types.lines;
+            description = ''
+              Shell commands to run after <command>borg init</command>.
+            '';
+            default = "";
+          };
+
+          postCreate = mkOption {
+            type = types.lines;
+            description = ''
+              Shell commands to run after <command>borg create</command>. The name
+              of the created archive is stored in <literal>$archiveName</literal>.
+            '';
+            default = "";
+          };
+
+          postPrune = mkOption {
+            type = types.lines;
+            description = ''
+              Shell commands to run after <command>borg prune</command>.
+            '';
+            default = "";
+          };
+
+          postHook = mkOption {
+            type = types.lines;
+            description = ''
+              Shell commands to run just before exit. They are executed
+              even if a previous command exits with a non-zero exit code.
+              The latter is available as <literal>$exitStatus</literal>.
+            '';
+            default = "";
+          };
+
+          extraInitArgs = mkOption {
+            type = types.str;
+            description = ''
+              Additional arguments for <command>borg init</command>.
+              Can also be set at runtime using <literal>$extraInitArgs</literal>.
+            '';
+            default = "";
+            example = "--append-only";
+          };
+
+          extraCreateArgs = mkOption {
+            type = types.str;
+            description = ''
+              Additional arguments for <command>borg create</command>.
+              Can also be set at runtime using <literal>$extraCreateArgs</literal>.
+            '';
+            default = "";
+            example = "--stats --checkpoint-interval 600";
+          };
+
+          extraPruneArgs = mkOption {
+            type = types.str;
+            description = ''
+              Additional arguments for <command>borg prune</command>.
+              Can also be set at runtime using <literal>$extraPruneArgs</literal>.
+            '';
+            default = "";
+            example = "--save-space";
+          };
+
+        };
+      }
+    ));
+  };
+
+  options.services.borgbackup.repos = mkOption {
+    description = ''
+      Serve BorgBackup repositories to given public SSH keys,
+      restricting their access to the repository only.
+      Also, clients do not need to specify the absolute path when accessing the repository,
+      i.e. <literal>user@machine:.</literal> is enough. (Note colon and dot.)
+    '';
+    default = { };
+    type = types.attrsOf (types.submodule (
+      { name, config, ... }: {
+        options = {
+          
+          path = mkOption {
+            type = types.path;
+            description = ''
+              Where to store the backups. Note that the directory
+              is created automatically, with correct permissions.
+            '';
+            default = "/var/lib/borgbackup";
+          };
+
+          user = mkOption {
+            type = types.str;
+            description = ''
+              The user <command>borg serve</command> is run as.
+              User or group needs write permission
+              for the specified <option>path</option>.
+            '';
+            default = "borg";
+          };
+
+          group = mkOption {
+            type = types.str;
+            description = ''
+              The group <command>borg serve</command> is run as.
+              User or group needs write permission
+              for the specified <option>path</option>.
+            '';
+            default = "borg";
+          };
+
+          authorizedKeys = mkOption {
+            type = with types; listOf str;
+            description = ''
+              Public SSH keys that are given full write access to this repository.
+              You should use a different SSH key for each repository you write to, because
+              the specified keys are restricted to running <command>borg serve</command>
+              and can only access this single repository.
+            '';
+            default = [ ];
+          };
+
+          authorizedKeysAppendOnly = mkOption {
+            type = with types; listOf str;
+            description = ''
+              Public SSH keys that can only be used to append new data (archives) to the repository.
+              Note that archives can still be marked as deleted and are subsequently removed from disk
+              upon accessing the repo with full write access, e.g. when pruning.
+            '';
+            default = [ ];
+          };
+
+          allowSubRepos = mkOption {
+            type = types.bool;
+            description = ''
+              Allow clients to create repositories in subdirectories of the
+              specified <option>path</option>. These can be accessed using
+              <literal>user@machine:path/to/subrepo</literal>. Note that a
+              <option>quota</option> applies to repositories independently.
+              Therefore, if this is enabled, clients can create multiple
+              repositories and upload an arbitrary amount of data.
+            '';
+            default = false;
+          };
+
+          quota = mkOption {
+            # See the definition of parse_file_size() in src/borg/helpers/parseformat.py
+            type = with types; nullOr (strMatching "[[:digit:].]+[KMGTP]?");
+            description = ''
+              Storage quota for the repository. This quota is ensured for all
+              sub-repositories if <option>allowSubRepos</option> is enabled
+              but not for the overall storage space used.
+            '';
+            default = null;
+            example = "100G";
+          };
+
+        };
+      }
+    ));
+  };
+
+  ###### implementation
+
+  config = mkIf (with config.services.borgbackup; jobs != { } || repos != { })
+    (with config.services.borgbackup; {
+      assertions =
+        mapAttrsToList mkPassAssertion jobs
+        ++ mapAttrsToList mkKeysAssertion repos;
+
+      system.activationScripts = mapAttrs' mkActivationScript jobs;
+
+      systemd.services =
+        # A job named "foo" is mapped to systemd.services.borgbackup-job-foo
+        mapAttrs' mkBackupService jobs
+        # A repo named "foo" is mapped to systemd.services.borgbackup-repo-foo
+        // mapAttrs' mkRepoService repos;
+
+      users = mkMerge (mapAttrsToList mkUsersConfig repos);
+
+      environment.systemPackages = with pkgs; [ borgbackup ];
+    });
+}
diff --git a/nixos/modules/services/backup/crashplan-small-business.nix b/nixos/modules/services/backup/crashplan-small-business.nix
new file mode 100644
index 000000000000..9497d8c18bb7
--- /dev/null
+++ b/nixos/modules/services/backup/crashplan-small-business.nix
@@ -0,0 +1,74 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.crashplansb;
+  crashplansb = pkgs.crashplansb.override { maxRam = cfg.maxRam; };
+  varDir = "/var/lib/crashplan";
+in
+
+with lib;
+
+{
+  options = {
+    services.crashplansb = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Starts crashplan for small business background service.
+        '';
+      };
+      maxRam = mkOption {
+        default = "1024m";
+        example = "2G";
+        type = types.str;
+        description = ''
+          Maximum amount of ram that the crashplan engine should use.
+        '';
+      };
+      openPorts = mkOption {
+        description = "Open ports in the firewall for crashplan.";
+        default = true;
+        type = types.bool;
+      };
+      ports =  mkOption {
+        # https://support.code42.com/Administrator/6/Planning_and_installing/TCP_and_UDP_ports_used_by_the_Code42_platform
+        # used ports can also be checked in the desktop app console using the command connection.info
+        description = "which ports to open.";
+        default = [ 4242 4243 4244 4247 ];
+        type = types.listOf types.int;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ crashplansb ];
+    networking.firewall.allowedTCPPorts = mkIf cfg.openPorts cfg.ports;
+
+    systemd.services.crashplansb = {
+      description = "CrashPlan Backup Engine";
+
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network.target" "local-fs.target" ];
+
+      preStart = ''
+        install -d -m 755 ${crashplansb.vardir}
+        install -d -m 700 ${crashplansb.vardir}/conf
+        install -d -m 700 ${crashplansb.manifestdir}
+        install -d -m 700 ${crashplansb.vardir}/cache
+        install -d -m 700 ${crashplansb.vardir}/backupArchives
+        install -d -m 777 ${crashplansb.vardir}/log
+        cp -avn ${crashplansb}/conf.template/* ${crashplansb.vardir}/conf
+      '';
+
+      serviceConfig = {
+        Type = "forking";
+        EnvironmentFile = "${crashplansb}/bin/run.conf";
+        ExecStart = "${crashplansb}/bin/CrashPlanEngine start";
+        ExecStop = "${crashplansb}/bin/CrashPlanEngine stop";
+        PIDFile = "${crashplansb.vardir}/CrashPlanEngine.pid";
+        WorkingDirectory = crashplansb;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/backup/duplicati.nix b/nixos/modules/services/backup/duplicati.nix
new file mode 100644
index 000000000000..9772ca4d20a7
--- /dev/null
+++ b/nixos/modules/services/backup/duplicati.nix
@@ -0,0 +1,40 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.duplicati;
+in
+{
+  options = {
+    services.duplicati = {
+      enable = mkEnableOption "Duplicati";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.duplicati ];
+
+    systemd.services.duplicati = {
+      description = "Duplicati backup";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "duplicati";
+        Group = "duplicati";
+        ExecStart = "${pkgs.duplicati}/bin/duplicati-server --webservice-interface=any --webservice-port=8200 --server-datafolder=/var/lib/duplicati";
+        Restart = "on-failure";
+      };
+    };
+
+    users.extraUsers.duplicati = {
+      uid = config.ids.uids.duplicati;
+      home = "/var/lib/duplicati";
+      createHome = true;
+      group = "duplicati";
+    };
+    users.extraGroups.duplicati.gid = config.ids.gids.duplicati;
+
+  };
+}
+
diff --git a/nixos/modules/services/backup/restic.nix b/nixos/modules/services/backup/restic.nix
new file mode 100644
index 000000000000..21d82469c605
--- /dev/null
+++ b/nixos/modules/services/backup/restic.nix
@@ -0,0 +1,150 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+{
+  options.services.restic.backups = mkOption {
+    description = ''
+      Periodic backups to create with Restic.
+    '';
+    type = types.attrsOf (types.submodule ({ name, config, ... }: {
+      options = {
+        passwordFile = mkOption {
+          type = types.str;
+          description = ''
+            Read the repository password from a file.
+          '';
+          example = "/etc/nixos/restic-password";
+
+        };
+
+        repository = mkOption {
+          type = types.str;
+          description = ''
+            repository to backup to.
+          '';
+          example = "sftp:backup@192.168.1.100:/backups/${name}";
+        };
+
+        paths = mkOption {
+          type = types.listOf types.str;
+          default = [];
+          description = ''
+            Which paths to backup.
+          '';
+          example = [
+            "/var/lib/postgresql"
+            "/home/user/backup"
+          ];
+        };
+
+        timerConfig = mkOption {
+          type = types.attrsOf types.str;
+          default = {
+            OnCalendar = "daily";
+          };
+          description = ''
+            When to run the backup. See man systemd.timer for details.
+          '';
+          example = {
+            OnCalendar = "00:05";
+            RandomizedDelaySec = "5h";
+          };
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "root";
+          description = ''
+            As which user the backup should run.
+          '';
+          example = "postgresql";
+        };
+
+        extraBackupArgs = mkOption {
+          type = types.listOf types.str;
+          default = [];
+          description = ''
+            Extra arguments passed to restic backup.
+          '';
+          example = [
+            "--exclude-file=/etc/nixos/restic-ignore"
+          ];
+        };
+
+        extraOptions = mkOption {
+          type = types.listOf types.str;
+          default = [];
+          description = ''
+            Extra extended options to be passed to the restic --option flag.
+          '';
+          example = [
+            "sftp.command='ssh backup@192.168.1.100 -i /home/user/.ssh/id_rsa -s sftp'"
+          ];
+        };
+
+        initialize = mkOption {
+          type = types.bool;
+          default = false;
+          description = ''
+            Create the repository if it doesn't exist.
+          '';
+        };
+      };
+    }));
+    default = {};
+    example = {
+      localbackup = {
+        paths = [ "/home" ];
+        repository = "/mnt/backup-hdd";
+        passwordFile = "/etc/nixos/secrets/restic-password";
+        initialize = true;
+      };
+      remotebackup = {
+        paths = [ "/home" ];
+        repository = "sftp:backup@host:/backups/home";
+        passwordFile = "/etc/nixos/secrets/restic-password";
+        extraOptions = [
+          "sftp.command='ssh backup@host -i /etc/nixos/secrets/backup-private-key -s sftp'"
+        ];
+        timerConfig = {
+          OnCalendar = "00:05";
+          RandomizedDelaySec = "5h";
+        };
+      };
+    };
+  };
+
+  config = {
+    systemd.services =
+      mapAttrs' (name: backup:
+        let
+          extraOptions = concatMapStrings (arg: " -o ${arg}") backup.extraOptions;
+          connectTo = elemAt (splitString ":" backup.repository) 1;
+          resticCmd = "${pkgs.restic}/bin/restic${extraOptions}";
+        in nameValuePair "restic-backups-${name}" ({
+          environment = {
+            RESTIC_PASSWORD_FILE = backup.passwordFile;
+            RESTIC_REPOSITORY = backup.repository;
+          };
+          path = with pkgs; [
+            openssh
+          ];
+          restartIfChanged = false;
+          serviceConfig = {
+            Type = "oneshot";
+            ExecStart = "${resticCmd} backup ${concatStringsSep " " backup.extraBackupArgs} ${concatStringsSep " " backup.paths}";
+            User = backup.user;
+          };
+        } // optionalAttrs backup.initialize {
+          preStart = ''
+            ${resticCmd} snapshots || ${resticCmd} init
+          '';
+        })
+      ) config.services.restic.backups;
+    systemd.timers =
+      mapAttrs' (name: backup: nameValuePair "restic-backups-${name}" {
+        wantedBy = [ "timers.target" ];
+        timerConfig = backup.timerConfig;
+      }) config.services.restic.backups;
+  };
+}
diff --git a/nixos/modules/services/backup/tarsnap.nix b/nixos/modules/services/backup/tarsnap.nix
index 7c9dedb67ad2..4fc7c24813a5 100644
--- a/nixos/modules/services/backup/tarsnap.nix
+++ b/nixos/modules/services/backup/tarsnap.nix
@@ -115,7 +115,7 @@ in
                 description = ''
                   Print global archive statistics upon completion.
                   The output is available via
-                  <command>systemctl status tarsnap@archive-name</command>.
+                  <command>systemctl status tarsnap-archive-name</command>.
                 '';
               };
 
@@ -238,6 +238,20 @@ in
                   Whether to produce verbose logging output.
                 '';
               };
+              explicitSymlinks = mkOption {
+                type = types.bool;
+                default = false;
+                description = ''
+                  Whether to follow symlinks specified as archives.
+                '';
+              };
+              followSymlinks = mkOption {
+                type = types.bool;
+                default = false;
+                description = ''
+                  Whether to follow all symlinks in archive trees.
+                '';
+              };
             };
           }
         ));
@@ -285,12 +299,12 @@ in
         }) gcfg.archives);
 
     systemd.services =
-      mapAttrs' (name: cfg: nameValuePair "tarsnap-${name}" {
+      (mapAttrs' (name: cfg: nameValuePair "tarsnap-${name}" {
         description = "Tarsnap archive '${name}'";
         requires    = [ "network-online.target" ];
         after       = [ "network-online.target" ];
 
-        path = [ pkgs.iputils pkgs.tarsnap pkgs.utillinux ];
+        path = with pkgs; [ iputils tarsnap utillinux ];
 
         # In order for the persistent tarsnap timer to work reliably, we have to
         # make sure that the tarsnap server is reachable after systemd starts up
@@ -300,10 +314,12 @@ in
           while ! ping -q -c 1 v1-0-0-server.tarsnap.com &> /dev/null; do sleep 3; done
         '';
 
-        script =
-          let run = ''tarsnap --configfile "/etc/tarsnap/${name}.conf" \
-                        -c -f "${name}-$(date +"%Y%m%d%H%M%S")" \
+        script = let
+          tarsnap = ''tarsnap --configfile "/etc/tarsnap/${name}.conf"'';
+          run = ''${tarsnap} -c -f "${name}-$(date +"%Y%m%d%H%M%S")" \
                         ${optionalString cfg.verbose "-v"} \
+                        ${optionalString cfg.explicitSymlinks "-H"} \
+                        ${optionalString cfg.followSymlinks "-L"} \
                         ${concatStringsSep " " cfg.directories}'';
           in if (cfg.cachedir != null) then ''
             mkdir -p ${cfg.cachedir}
@@ -313,7 +329,7 @@ in
               if [ ! -e ${cfg.cachedir}/firstrun ]; then
                 ( flock 10
                   flock -u 9
-                  tarsnap --configfile "/etc/tarsnap/${name}.conf" --fsck
+                  ${tarsnap} --fsck
                   flock 9
                 ) 10>${cfg.cachedir}/firstrun
               fi
@@ -329,7 +345,44 @@ in
           CapabilityBoundingSet = [ "CAP_DAC_READ_SEARCH" ];
           PermissionsStartOnly = "true";
         };
-      }) gcfg.archives;
+      }) gcfg.archives) //
+
+      (mapAttrs' (name: cfg: nameValuePair "tarsnap-restore-${name}"{
+        description = "Tarsnap restore '${name}'";
+        requires    = [ "network-online.target" ];
+
+        path = with pkgs; [ iputils tarsnap utillinux ];
+
+        script = let
+          tarsnap = ''tarsnap --configfile "/etc/tarsnap/${name}.conf"'';
+          lastArchive = ''$(${tarsnap} --list-archives | sort | tail -1)'';
+          run = ''${tarsnap} -x -f "${lastArchive}" ${optionalString cfg.verbose "-v"}'';
+
+        in if (cfg.cachedir != null) then ''
+          mkdir -p ${cfg.cachedir}
+          chmod 0700 ${cfg.cachedir}
+
+          ( flock 9
+            if [ ! -e ${cfg.cachedir}/firstrun ]; then
+              ( flock 10
+                flock -u 9
+                ${tarsnap} --fsck
+                flock 9
+              ) 10>${cfg.cachedir}/firstrun
+            fi
+          ) 9>${cfg.cachedir}/lockf
+
+           exec flock ${cfg.cachedir}/firstrun ${run}
+        '' else "exec ${run}";
+
+        serviceConfig = {
+          Type = "oneshot";
+          IOSchedulingClass = "idle";
+          NoNewPrivileges = "true";
+          CapabilityBoundingSet = [ "CAP_DAC_READ_SEARCH" ];
+          PermissionsStartOnly = "true";
+        };
+      }) gcfg.archives);
 
     # Note: the timer must be Persistent=true, so that systemd will start it even
     # if e.g. your laptop was asleep while the latest interval occurred.
diff --git a/nixos/modules/services/backup/znapzend.nix b/nixos/modules/services/backup/znapzend.nix
index baf99930e3eb..3d133f82d204 100644
--- a/nixos/modules/services/backup/znapzend.nix
+++ b/nixos/modules/services/backup/znapzend.nix
@@ -1,39 +1,372 @@
 { config, lib, pkgs, ... }:
 
 with lib;
+with types;
 
 let
+
+  # Converts a plan like
+  #   { "1d" = "1h"; "1w" = "1d"; }
+  # into
+  #   "1d=>1h,1w=>1d"
+  attrToPlan = attrs: concatStringsSep "," (builtins.attrValues (
+    mapAttrs (n: v: "${n}=>${v}") attrs));
+
+  planDescription = ''
+      The znapzend backup plan to use for the source.
+    </para>
+    <para>
+      The plan specifies how often to backup and for how long to keep the
+      backups. It consists of a series of retention periodes to interval
+      associations:
+    </para>
+    <para>
+      <literal>
+        retA=>intA,retB=>intB,...
+      </literal>
+    </para>
+    <para>
+    Both intervals and retention periods are expressed in standard units
+    of time or multiples of them. You can use both the full name or a
+    shortcut according to the following listing:
+    </para>
+    <para>
+      <literal>
+        second|sec|s, minute|min, hour|h, day|d, week|w, month|mon|m, year|y
+      </literal>
+    </para>
+    <para>
+      See <citerefentry><refentrytitle>znapzendzetup</refentrytitle><manvolnum>1</manvolnum></citerefentry> for more info.
+  '';
+  planExample = "1h=>10min,1d=>1h,1w=>1d,1m=>1w,1y=>1m";
+
+  # A type for a string of the form number{b|k|M|G}
+  mbufferSizeType = str // {
+    check = x: str.check x && builtins.isList (builtins.match "^[0-9]+[bkMG]$" x);
+    description = "string of the form number{b|k|M|G}";
+  };
+
+  # Type for a string that must contain certain other strings (the list parameter).
+  # Note that these would need regex escaping.
+  stringContainingStrings = list: let
+    matching = s: map (str: builtins.match ".*${str}.*" s) list;
+  in str // {
+    check = x: str.check x && all isList (matching x);
+    description = "string containing all of the characters ${concatStringsSep ", " list}";
+  };
+
+  timestampType = stringContainingStrings [ "%Y" "%m" "%d" "%H" "%M" "%S" ];
+
+  destType = srcConfig: submodule ({ name, ... }: {
+    options = {
+
+      label = mkOption {
+        type = str;
+        description = "Label for this destination. Defaults to the attribute name.";
+      };
+
+      plan = mkOption {
+        type = str;
+        description = planDescription;
+        example = planExample;
+      };
+
+      dataset = mkOption {
+        type = str;
+        description = "Dataset name to send snapshots to.";
+        example = "tank/main";
+      };
+
+      host = mkOption {
+        type = nullOr str;
+        description = ''
+          Host to use for the destination dataset. Can be prefixed with
+          <literal>user@</literal> to specify the ssh user.
+        '';
+        default = null;
+        example = "john@example.com";
+      };
+
+      presend = mkOption {
+        type = nullOr str;
+        description = ''
+          Command to run before sending the snapshot to the destination.
+          Intended to run a remote script via <command>ssh</command> on the
+          destination, e.g. to bring up a backup disk or server or to put a
+          zpool online/offline. See also <option>postsend</option>.
+        '';
+        default = null;
+        example = "ssh root@bserv zpool import -Nf tank";
+      };
+
+      postsend = mkOption {
+        type = nullOr str;
+        description = ''
+          Command to run after sending the snapshot to the destination.
+          Intended to run a remote script via <command>ssh</command> on the
+          destination, e.g. to bring up a backup disk or server or to put a
+          zpool online/offline. See also <option>presend</option>.
+        '';
+        default = null;
+        example = "ssh root@bserv zpool export tank";
+      };
+    };
+
+    config = {
+      label = mkDefault name;
+      plan = mkDefault srcConfig.plan;
+    };
+  });
+
+
+
+  srcType = submodule ({ name, config, ... }: {
+    options = {
+
+      enable = mkOption {
+        type = bool;
+        description = "Whether to enable this source.";
+        default = true;
+      };
+
+      recursive = mkOption {
+        type = bool;
+        description = "Whether to do recursive snapshots.";
+        default = false;
+      };
+
+      mbuffer = {
+        enable = mkOption {
+          type = bool;
+          description = "Whether to use <command>mbuffer</command>.";
+          default = false;
+        };
+
+        port = mkOption {
+          type = nullOr ints.u16;
+          description = ''
+              Port to use for <command>mbuffer</command>.
+            </para>
+            <para>
+              If this is null, it will run <command>mbuffer</command> through
+              ssh.
+            </para>
+            <para>
+              If this is not null, it will run <command>mbuffer</command>
+              directly through TCP, which is not encrypted but faster. In that
+              case the given port needs to be open on the destination host.
+          '';
+          default = null;
+        };
+
+        size = mkOption {
+          type = mbufferSizeType;
+          description = ''
+            The size for <command>mbuffer</command>.
+            Supports the units b, k, M, G.
+          '';
+          default = "1G";
+          example = "128M";
+        };
+      };
+
+      presnap = mkOption {
+        type = nullOr str;
+        description = ''
+          Command to run before snapshots are taken on the source dataset,
+          e.g. for database locking/flushing. See also
+          <option>postsnap</option>.
+        '';
+        default = null;
+        example = literalExample ''
+          ''${pkgs.mariadb}/bin/mysql -e "set autocommit=0;flush tables with read lock;\\! ''${pkgs.coreutils}/bin/sleep 600" &  ''${pkgs.coreutils}/bin/echo $! > /tmp/mariadblock.pid ; sleep 10
+        '';
+      };
+
+      postsnap = mkOption {
+        type = nullOr str;
+        description = ''
+          Command to run after snapshots are taken on the source dataset,
+          e.g. for database unlocking. See also <option>presnap</option>.
+        '';
+        default = null;
+        example = literalExample ''
+          ''${pkgs.coreutils}/bin/kill `''${pkgs.coreutils}/bin/cat /tmp/mariadblock.pid`;''${pkgs.coreutils}/bin/rm /tmp/mariadblock.pid
+        '';
+      };
+
+      timestampFormat = mkOption {
+        type = timestampType;
+        description = ''
+          The timestamp format to use for constructing snapshot names.
+          The syntax is <literal>strftime</literal>-like. The string must
+          consist of the mandatory <literal>%Y %m %d %H %M %S</literal>.
+          Optionally  <literal>- _ . :</literal>  characters as well as any
+          alphanumeric character are allowed. If suffixed by a
+          <literal>Z</literal>, times will be in UTC.
+        '';
+        default = "%Y-%m-%d-%H%M%S";
+        example = "znapzend-%m.%d.%Y-%H%M%SZ";
+      };
+
+      sendDelay = mkOption {
+        type = int;
+        description = ''
+          Specify delay (in seconds) before sending snaps to the destination.
+          May be useful if you want to control sending time.
+        '';
+        default = 0;
+        example = 60;
+      };
+
+      plan = mkOption {
+        type = str;
+        description = planDescription;
+        example = planExample;
+      };
+
+      dataset = mkOption {
+        type = str;
+        description = "The dataset to use for this source.";
+        example = "tank/home";
+      };
+
+      destinations = mkOption {
+        type = loaOf (destType config);
+        description = "Additional destinations.";
+        default = {};
+        example = literalExample ''
+          {
+            local = {
+              dataset = "btank/backup";
+              presend = "zpool import -N btank";
+              postsend = "zpool export btank";
+            };
+            remote = {
+              host = "john@example.com";
+              dataset = "tank/john";
+            };
+          };
+        '';
+      };
+    };
+
+    config = {
+      dataset = mkDefault name;
+    };
+
+  });
+
+  ### Generating the configuration from here
+
   cfg = config.services.znapzend;
+
+  onOff = b: if b then "on" else "off";
+  nullOff = b: if isNull b then "off" else toString b;
+  stripSlashes = replaceStrings [ "/" ] [ "." ];
+
+  attrsToFile = config: concatStringsSep "\n" (builtins.attrValues (
+    mapAttrs (n: v: "${n}=${v}") config));
+
+  mkDestAttrs = dst: with dst;
+    mapAttrs' (n: v: nameValuePair "dst_${label}${n}" v) ({
+      "" = optionalString (! isNull host) "${host}:" + dataset;
+      _plan = plan;
+    } // optionalAttrs (presend != null) {
+      _precmd = presend;
+    } // optionalAttrs (postsend != null) {
+      _pstcmd = postsend;
+    });
+
+  mkSrcAttrs = srcCfg: with srcCfg; {
+    enabled = onOff enable;
+    mbuffer = with mbuffer; if enable then "${pkgs.mbuffer}/bin/mbuffer"
+        + optionalString (port != null) ":${toString port}" else "off";
+    mbuffer_size = mbuffer.size;
+    post_znap_cmd = nullOff postsnap;
+    pre_znap_cmd = nullOff presnap;
+    recursive = onOff recursive;
+    src = dataset;
+    src_plan = plan;
+    tsformat = timestampFormat;
+    zend_delay = toString sendDelay;
+  } // fold (a: b: a // b) {} (
+    map mkDestAttrs (builtins.attrValues destinations)
+  );
+
+  files = mapAttrs' (n: srcCfg: let
+    fileText = attrsToFile (mkSrcAttrs srcCfg);
+  in {
+    name = srcCfg.dataset;
+    value = pkgs.writeText (stripSlashes srcCfg.dataset) fileText;
+  }) cfg.zetup;
+
 in
 {
   options = {
     services.znapzend = {
-      enable = mkEnableOption "ZnapZend daemon";
+      enable = mkEnableOption "ZnapZend ZFS backup daemon";
 
       logLevel = mkOption {
         default = "debug";
         example = "warning";
-        type = lib.types.enum ["debug" "info" "warning" "err" "alert"];
-        description = "The log level when logging to file. Any of debug, info, warning, err, alert. Default in daemonized form is debug.";
+        type = enum ["debug" "info" "warning" "err" "alert"];
+        description = ''
+          The log level when logging to file. Any of debug, info, warning, err,
+          alert. Default in daemonized form is debug.
+        '';
       };
 
       logTo = mkOption {
-        type = types.str;
+        type = str;
         default = "syslog::daemon";
         example = "/var/log/znapzend.log";
-        description = "Where to log to (syslog::&lt;facility&gt; or &lt;filepath&gt;).";
+        description = ''
+          Where to log to (syslog::&lt;facility&gt; or &lt;filepath&gt;).
+        '';
       };
 
       noDestroy = mkOption {
-        type = types.bool;
+        type = bool;
         default = false;
         description = "Does all changes to the filesystem except destroy.";
       };
 
       autoCreation = mkOption {
-        type = types.bool;
+        type = bool;
+        default = false;
+        description = "Automatically create the destination dataset if it does not exists.";
+      };
+
+      zetup = mkOption {
+        type = loaOf srcType;
+        description = "Znapzend configuration.";
+        default = {};
+        example = literalExample ''
+          {
+            "tank/home" = {
+              # Make snapshots of tank/home every hour, keep those for 1 day,
+              # keep every days snapshot for 1 month, etc.
+              plan = "1d=>1h,1m=>1d,1y=>1m";
+              recursive = true;
+              # Send all those snapshots to john@example.com:rtank/john as well
+              destinations.remote = {
+                host = "john@example.com";
+                dataset = "rtank/john";
+              };
+            };
+          };
+        '';
+      };
+
+      pure = mkOption {
+        type = bool;
+        description = ''
+          Do not persist any stateful znapzend setups. If this option is
+          enabled, your previously set znapzend setups will be cleared and only
+          the ones defined with this module will be applied.
+        '';
         default = false;
-        description = "Automatically create the dataset on dest if it does not exists.";
       };
     };
   };
@@ -49,12 +382,30 @@ in
 
         path = with pkgs; [ zfs mbuffer openssh ];
 
+        preStart = optionalString cfg.pure ''
+          echo Resetting znapzend zetups
+          ${pkgs.znapzend}/bin/znapzendzetup list \
+            | grep -oP '(?<=\*\*\* backup plan: ).*(?= \*\*\*)' \
+            | xargs -I{} ${pkgs.znapzend}/bin/znapzendzetup delete "{}"
+        '' + concatStringsSep "\n" (mapAttrsToList (dataset: config: ''
+          echo Importing znapzend zetup ${config} for dataset ${dataset}
+          ${pkgs.znapzend}/bin/znapzendzetup import --write ${dataset} ${config}
+        '') files);
+
         serviceConfig = {
-          ExecStart = "${pkgs.znapzend}/bin/znapzend --logto=${cfg.logTo} --loglevel=${cfg.logLevel} ${optionalString cfg.noDestroy "--nodestroy"} ${optionalString cfg.autoCreation "--autoCreation"}";
+          ExecStart = let
+            args = concatStringsSep " " [
+              "--logto=${cfg.logTo}"
+              "--loglevel=${cfg.logLevel}"
+              (optionalString cfg.noDestroy "--nodestroy")
+              (optionalString cfg.autoCreation "--autoCreation")
+            ]; in "${pkgs.znapzend}/bin/znapzend ${args}";
           ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
           Restart = "on-failure";
         };
       };
     };
   };
+
+  meta.maintainers = with maintainers; [ infinisil ];
 }
diff --git a/nixos/modules/services/cluster/kubernetes/dashboard.nix b/nixos/modules/services/cluster/kubernetes/dashboard.nix
index 75d71fccfda4..e331889b9dd5 100644
--- a/nixos/modules/services/cluster/kubernetes/dashboard.nix
+++ b/nixos/modules/services/cluster/kubernetes/dashboard.nix
@@ -6,12 +6,12 @@ let
   cfg = config.services.kubernetes.addons.dashboard;
 
   name = "gcr.io/google_containers/kubernetes-dashboard-amd64";
-	version = "v1.6.3";
+	version = "v1.8.2";
 
   image = pkgs.dockerTools.pullImage {
     imageName = name;
     imageTag = version;
-    sha256 = "1sf54d96nkgic9hir9c6p14gw24ns1k5d5a0r1sg414kjrvic0b4";
+    sha256 = "11h0fz3wxp0f10fsyqaxjm7l2qg7xws50dv5iwlck5gb1fjmajad";
   };
 in {
   options.services.kubernetes.addons.dashboard = {
diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix
index 077953e4d4f8..aeb0a0d2432d 100644
--- a/nixos/modules/services/cluster/kubernetes/default.nix
+++ b/nixos/modules/services/cluster/kubernetes/default.nix
@@ -279,7 +279,7 @@ in {
       tokenAuthFile = mkOption {
         description = ''
           Kubernetes apiserver token authentication file. See
-          <link xlink:href="http://kubernetes.io/docs/admin/authentication.html"/>
+          <link xlink:href="https://kubernetes.io/docs/admin/authentication.html"/>
         '';
         default = null;
         type = types.nullOr types.path;
@@ -288,7 +288,7 @@ in {
       basicAuthFile = mkOption {
         description = ''
           Kubernetes apiserver basic authentication file. See
-          <link xlink:href="http://kubernetes.io/docs/admin/authentication.html"/>
+          <link xlink:href="https://kubernetes.io/docs/admin/authentication.html"/>
         '';
         default = pkgs.writeText "users" ''
           kubernetes,admin,0
@@ -299,16 +299,16 @@ in {
       authorizationMode = mkOption {
         description = ''
           Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/RBAC). See
-          <link xlink:href="http://kubernetes.io/docs/admin/authorization.html"/>
+          <link xlink:href="https://kubernetes.io/docs/admin/authorization.html"/>
         '';
-        default = ["RBAC"];
-        type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "RBAC"]);
+        default = ["RBAC" "Node"];
+        type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "RBAC" "Node"]);
       };
 
       authorizationPolicy = mkOption {
         description = ''
           Kubernetes apiserver authorization policy file. See
-          <link xlink:href="http://kubernetes.io/docs/admin/authorization.html"/>
+          <link xlink:href="https://kubernetes.io/docs/admin/authorization.html"/>
         '';
         default = [];
         type = types.listOf types.attrs;
@@ -332,7 +332,7 @@ in {
       runtimeConfig = mkOption {
         description = ''
           Api runtime configuration. See
-          <link xlink:href="http://kubernetes.io/docs/admin/cluster-management.html"/>
+          <link xlink:href="https://kubernetes.io/docs/admin/cluster-management.html"/>
         '';
         default = "authentication.k8s.io/v1beta1=true";
         example = "api/all=false,api/v1=true";
@@ -342,9 +342,9 @@ in {
       admissionControl = mkOption {
         description = ''
           Kubernetes admission control plugins to use. See
-          <link xlink:href="http://kubernetes.io/docs/admin/admission-controllers/"/>
+          <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
         '';
-        default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds"];
+        default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds" "NodeRestriction"];
         example = [
           "NamespaceLifecycle" "NamespaceExists" "LimitRanger"
           "SecurityContextDeny" "ServiceAccount" "ResourceQuota"
@@ -766,7 +766,7 @@ in {
           rm /opt/cni/bin/* || true
           ${concatMapStrings (package: ''
             echo "Linking cni package: ${package}"
-            ln -fs ${package.plugins}/* /opt/cni/bin
+            ln -fs ${package}/bin/* /opt/cni/bin
           '') cfg.kubelet.cni.packages}
         '';
         serviceConfig = {
@@ -828,7 +828,7 @@ in {
       };
 
       # Allways include cni plugins
-      services.kubernetes.kubelet.cni.packages = [pkgs.cni];
+      services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins];
 
       boot.kernelModules = ["br_netfilter"];
 
diff --git a/nixos/modules/services/computing/slurm/slurm.nix b/nixos/modules/services/computing/slurm/slurm.nix
index fb91a29a4000..45d34f5b76f5 100644
--- a/nixos/modules/services/computing/slurm/slurm.nix
+++ b/nixos/modules/services/computing/slurm/slurm.nix
@@ -6,14 +6,20 @@ let
 
   cfg = config.services.slurm;
   # configuration file can be generated by http://slurm.schedmd.com/configurator.html
-  configFile = pkgs.writeText "slurm.conf" 
+  configFile = pkgs.writeText "slurm.conf"
     ''
       ${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
       ${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
       ${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
       ${optionalString (cfg.partitionName != null) ''partitionName=${cfg.partitionName}''}
+      PlugStackConfig=${plugStackConfig}
       ${cfg.extraConfig}
     '';
+
+  plugStackConfig = pkgs.writeText "plugstack.conf"
+    ''
+      ${optionalString cfg.enableSrunX11 ''optional ${pkgs.slurm-spank-x11}/lib/x11.so''}
+    '';
 in
 
 {
@@ -28,7 +34,7 @@ in
         enable = mkEnableOption "slurm control daemon";
 
       };
-      
+
       client = {
         enable = mkEnableOption "slurm rlient daemon";
 
@@ -86,8 +92,19 @@ in
         '';
       };
 
+      enableSrunX11 = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          If enabled srun will accept the option "--x11" to allow for X11 forwarding
+          from within an interactive session or a batch job. This activates the
+          slurm-spank-x11 module. Note that this requires 'services.openssh.forwardX11'
+          to be enabled on the compute nodes.
+        '';
+      };
+
       extraConfig = mkOption {
-        default = ""; 
+        default = "";
         type = types.lines;
         description = ''
           Extra configuration options that will be added verbatim at
@@ -134,7 +151,8 @@ in
     environment.systemPackages = [ wrappedSlurm ];
 
     systemd.services.slurmd = mkIf (cfg.client.enable) {
-      path = with pkgs; [ wrappedSlurm coreutils ];
+      path = with pkgs; [ wrappedSlurm coreutils ]
+        ++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
 
       wantedBy = [ "multi-user.target" ];
       after = [ "systemd-tmpfiles-clean.service" ];
@@ -152,8 +170,9 @@ in
     };
 
     systemd.services.slurmctld = mkIf (cfg.server.enable) {
-      path = with pkgs; [ wrappedSlurm munge coreutils ];
-      
+      path = with pkgs; [ wrappedSlurm munge coreutils ]
+        ++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
+
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" "munged.service" ];
       requires = [ "munged.service" ];
diff --git a/nixos/modules/services/continuous-integration/buildkite-agent.nix b/nixos/modules/services/continuous-integration/buildkite-agent.nix
index 9c06e1d43bbe..03af9a7859ec 100644
--- a/nixos/modules/services/continuous-integration/buildkite-agent.nix
+++ b/nixos/modules/services/continuous-integration/buildkite-agent.nix
@@ -4,14 +4,31 @@ with lib;
 
 let
   cfg = config.services.buildkite-agent;
-  configFile = pkgs.writeText "buildkite-agent.cfg"
-    ''
-      token="${cfg.token}"
-      name="${cfg.name}"
-      meta-data="${cfg.meta-data}"
-      hooks-path="${cfg.package}/share/hooks"
-      build-path="${cfg.dataDir}"
+
+  mkHookOption = { name, description, example ? null }: {
+    inherit name;
+    value = mkOption {
+      default = null;
+      inherit description;
+      type = types.nullOr types.lines;
+    } // (if example == null then {} else { inherit example; });
+  };
+  mkHookOptions = hooks: listToAttrs (map mkHookOption hooks);
+
+  hooksDir = let
+    mkHookEntry = name: value: ''
+      cat > $out/${name} <<EOF
+      #! ${pkgs.runtimeShell}
+      set -e
+      ${value}
+      EOF
+      chmod 755 $out/${name}
     '';
+  in pkgs.runCommand "buildkite-agent-hooks" {} ''
+    mkdir $out
+    ${concatStringsSep "\n" (mapAttrsToList mkHookEntry (filterAttrs (n: v: v != null) cfg.hooks))}
+  '';
+
 in
 
 {
@@ -39,15 +56,19 @@ in
         type = types.listOf types.package;
       };
 
-      token = mkOption {
-        type = types.str;
+      tokenPath = mkOption {
+        type = types.path;
         description = ''
           The token from your Buildkite "Agents" page.
+
+          A run-time path to the token file, which is supposed to be provisioned
+          outside of Nix store.
         '';
       };
 
       name = mkOption {
         type = types.str;
+        default = "%hostname-%n";
         description = ''
           The name of the agent.
         '';
@@ -56,25 +77,110 @@ in
       meta-data = mkOption {
         type = types.str;
         default = "";
+        example = "queue=default,docker=true,ruby2=true";
+        description = ''
+          Meta data for the agent. This is a comma-separated list of
+          <code>key=value</code> pairs.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = "debug=true";
         description = ''
-          Meta data for the agent.
+          Extra lines to be added verbatim to the configuration file.
         '';
       };
 
       openssh =
-        { privateKey = mkOption {
-            type = types.str;
+        { privateKeyPath = mkOption {
+            type = types.path;
             description = ''
               Private agent key.
+
+              A run-time path to the key file, which is supposed to be provisioned
+              outside of Nix store.
             '';
           };
-          publicKey = mkOption {
-            type = types.str;
+          publicKeyPath = mkOption {
+            type = types.path;
             description = ''
               Public agent key.
+
+              A run-time path to the key file, which is supposed to be provisioned
+              outside of Nix store.
             '';
           };
         };
+
+      hooks = mkHookOptions [
+        { name = "checkout";
+          description = ''
+            The `checkout` hook script will replace the default checkout routine of the
+            bootstrap.sh script. You can use this hook to do your own SCM checkout
+            behaviour
+          ''; }
+        { name = "command";
+          description = ''
+            The `command` hook script will replace the default implementation of running
+            the build command.
+          ''; }
+        { name = "environment";
+          description = ''
+            The `environment` hook will run before all other commands, and can be used
+            to set up secrets, data, etc. Anything exported in hooks will be available
+            to the build script.
+
+            Note: the contents of this file will be copied to the world-readable
+            Nix store.
+          '';
+          example = ''
+            export SECRET_VAR=`head -1 /run/keys/secret`
+          ''; }
+        { name = "post-artifact";
+          description = ''
+            The `post-artifact` hook will run just after artifacts are uploaded
+          ''; }
+        { name = "post-checkout";
+          description = ''
+            The `post-checkout` hook will run after the bootstrap script has checked out
+            your projects source code.
+          ''; }
+        { name = "post-command";
+          description = ''
+            The `post-command` hook will run after the bootstrap script has run your
+            build commands
+          ''; }
+        { name = "pre-artifact";
+          description = ''
+            The `pre-artifact` hook will run just before artifacts are uploaded
+          ''; }
+        { name = "pre-checkout";
+          description = ''
+            The `pre-checkout` hook will run just before your projects source code is
+            checked out from your SCM provider
+          ''; }
+        { name = "pre-command";
+          description = ''
+            The `pre-command` hook will run just before your build command runs
+          ''; }
+        { name = "pre-exit";
+          description = ''
+            The `pre-exit` hook will run just before your build job finishes
+          ''; }
+      ];
+
+      hooksPath = mkOption {
+        type = types.path;
+        default = hooksDir;
+        defaultText = "generated from services.buildkite-agent.hooks";
+        description = ''
+          Path to the directory storing the hooks.
+          Consider using <option>services.buildkite-agent.hooks.&lt;name&gt;</option>
+          instead.
+        '';
+      };
     };
   };
 
@@ -84,6 +190,7 @@ in
         home = cfg.dataDir;
         createHome = true;
         description = "Buildkite agent user";
+        extraGroups = [ "keys" ];
       };
 
     environment.systemPackages = [ cfg.package ];
@@ -92,28 +199,54 @@ in
       { description = "Buildkite Agent";
         wantedBy = [ "multi-user.target" ];
         after = [ "network.target" ];
-        path = cfg.runtimePackages;
+        path = cfg.runtimePackages ++ [ pkgs.coreutils ];
         environment = config.networking.proxy.envVars // {
           HOME = cfg.dataDir;
           NIX_REMOTE = "daemon";
         };
-        preStart = ''
-          ${pkgs.coreutils}/bin/mkdir -m 0700 -p ${cfg.dataDir}/.ssh
-
-          echo "${cfg.openssh.privateKey}" > ${cfg.dataDir}/.ssh/id_rsa
-          ${pkgs.coreutils}/bin/chmod 600 ${cfg.dataDir}/.ssh/id_rsa
 
-          echo "${cfg.openssh.publicKey}" > ${cfg.dataDir}/.ssh/id_rsa.pub
-          ${pkgs.coreutils}/bin/chmod 600 ${cfg.dataDir}/.ssh/id_rsa.pub
-        '';
+        ## NB: maximum care is taken so that secrets (ssh keys and the CI token)
+        ##     don't end up in the Nix store.
+        preStart = let
+          sshDir = "${cfg.dataDir}/.ssh";
+        in
+          ''
+            mkdir -m 0700 -p "${sshDir}"
+            cp -f "${toString cfg.openssh.privateKeyPath}" "${sshDir}/id_rsa"
+            cp -f "${toString cfg.openssh.publicKeyPath}"  "${sshDir}/id_rsa.pub"
+            chmod 600 "${sshDir}"/id_rsa*
+
+            cat > "${cfg.dataDir}/buildkite-agent.cfg" <<EOF
+            token="$(cat ${toString cfg.tokenPath})"
+            name="${cfg.name}"
+            meta-data="${cfg.meta-data}"
+            build-path="${cfg.dataDir}/builds"
+            hooks-path="${cfg.hooksPath}"
+            ${cfg.extraConfig}
+            EOF
+          '';
 
         serviceConfig =
-          { ExecStart = "${pkgs.buildkite-agent}/bin/buildkite-agent start --config ${configFile}";
+          { ExecStart = "${pkgs.buildkite-agent}/bin/buildkite-agent start --config /var/lib/buildkite-agent/buildkite-agent.cfg";
             User = "buildkite-agent";
             RestartSec = 5;
             Restart = "on-failure";
             TimeoutSec = 10;
           };
       };
+
+    assertions = [
+      { assertion = cfg.hooksPath == hooksDir || all isNull (attrValues cfg.hooks);
+        message = ''
+          Options `services.buildkite-agent.hooksPath' and
+          `services.buildkite-agent.hooks.<name>' are mutually exclusive.
+        '';
+      }
+    ];
   };
+  imports = [
+    (mkRenamedOptionModule [ "services" "buildkite-agent" "token" ]                [ "services" "buildkite-agent" "tokenPath" ])
+    (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "privateKey" ] [ "services" "buildkite-agent" "openssh" "privateKeyPath" ])
+    (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "publicKey" ]  [ "services" "buildkite-agent" "openssh" "publicKeyPath" ])
+  ];
 }
diff --git a/nixos/modules/services/continuous-integration/hydra/default.nix b/nixos/modules/services/continuous-integration/hydra/default.nix
index 43fec5ff5bb2..2fa7c59a965d 100644
--- a/nixos/modules/services/continuous-integration/hydra/default.nix
+++ b/nixos/modules/services/continuous-integration/hydra/default.nix
@@ -28,6 +28,7 @@ let
 
   serverEnv = env //
     { HYDRA_TRACKER = cfg.tracker;
+      XDG_CACHE_HOME = "${baseDir}/www/.cache";
       COLUMNS = "80";
       PGPASSFILE = "${baseDir}/pgpass-www"; # grrr
     } // (optionalAttrs cfg.debugServer { DBIC_TRACE = "1"; });
@@ -225,14 +226,14 @@ in
 
     services.hydra.extraConfig =
       ''
-        using_frontend_proxy 1
-        base_uri ${cfg.hydraURL}
-        notification_sender ${cfg.notificationSender}
-        max_servers 25
+        using_frontend_proxy = 1
+        base_uri = ${cfg.hydraURL}
+        notification_sender = ${cfg.notificationSender}
+        max_servers = 25
         ${optionalString (cfg.logo != null) ''
-          hydra_logo ${cfg.logo}
+          hydra_logo = ${cfg.logo}
         ''}
-        gc_roots_dir ${cfg.gcRootsDir}
+        gc_roots_dir = ${cfg.gcRootsDir}
         use-substitutes = ${if cfg.useSubstitutes then "1" else "0"}
       '';
 
diff --git a/nixos/modules/services/continuous-integration/jenkins/default.nix b/nixos/modules/services/continuous-integration/jenkins/default.nix
index 0dd59e4fb444..c2f4e9c0c5a7 100644
--- a/nixos/modules/services/continuous-integration/jenkins/default.nix
+++ b/nixos/modules/services/continuous-integration/jenkins/default.nix
@@ -145,6 +145,11 @@ in {
   };
 
   config = mkIf cfg.enable {
+    # server references the dejavu fonts
+    environment.systemPackages = [
+      pkgs.dejavu_fonts
+    ];
+
     users.extraGroups = optional (cfg.group == "jenkins") {
       name = "jenkins";
       gid = config.ids.gids.jenkins;
@@ -200,15 +205,17 @@ in {
           ${replacePlugins}
         '';
 
+      # For reference: https://wiki.jenkins.io/display/JENKINS/JenkinsLinuxStartupScript
       script = ''
         ${pkgs.jdk}/bin/java ${concatStringsSep " " cfg.extraJavaOptions} -jar ${cfg.package}/webapps/jenkins.war --httpListenAddress=${cfg.listenAddress} \
                                                   --httpPort=${toString cfg.port} \
                                                   --prefix=${cfg.prefix} \
+                                                  -Djava.awt.headless=true \
                                                   ${concatStringsSep " " cfg.extraOptions}
       '';
 
       postStart = ''
-        until [[ $(${pkgs.curl.bin}/bin/curl -s --head -w '\n%{http_code}' http://${cfg.listenAddress}:${toString cfg.port}${cfg.prefix} | tail -n1) =~ ^(200|403)$ ]]; do
+        until [[ $(${pkgs.curl.bin}/bin/curl -L -s --head -w '\n%{http_code}' http://${cfg.listenAddress}:${toString cfg.port}${cfg.prefix} | tail -n1) =~ ^(200|403)$ ]]; do
           sleep 1
         done
       '';
diff --git a/nixos/modules/services/databases/4store-endpoint.nix b/nixos/modules/services/databases/4store-endpoint.nix
index 906cb320df98..d528355671f6 100644
--- a/nixos/modules/services/databases/4store-endpoint.nix
+++ b/nixos/modules/services/databases/4store-endpoint.nix
@@ -2,7 +2,7 @@
 let
   cfg = config.services.fourStoreEndpoint;
   endpointUser = "fourstorehttp";
-  run = "${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${endpointUser} -c";
+  run = "${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${endpointUser} -c";
 in
 with lib;
 {
diff --git a/nixos/modules/services/databases/4store.nix b/nixos/modules/services/databases/4store.nix
index 62856822f906..abb62e1f2637 100644
--- a/nixos/modules/services/databases/4store.nix
+++ b/nixos/modules/services/databases/4store.nix
@@ -3,7 +3,7 @@ let
   cfg = config.services.fourStore;
   stateDir = "/var/lib/4store";
   fourStoreUser = "fourstore";
-  run = "${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${fourStoreUser}";
+  run = "${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${fourStoreUser}";
 in
 with lib;
 {
diff --git a/nixos/modules/services/databases/memcached.nix b/nixos/modules/services/databases/memcached.nix
index c6875af506d3..46bc6fc5c132 100644
--- a/nixos/modules/services/databases/memcached.nix
+++ b/nixos/modules/services/databases/memcached.nix
@@ -40,11 +40,7 @@ in
         description = "The port to bind to";
       };
 
-      socket = mkOption {
-        default = "";
-        description = "Unix socket path to listen on. Setting this will disable network support";
-        example = "/var/run/memcached";
-      };
+      enableUnixSocket = mkEnableOption "unix socket at /run/memcached/memcached.sock";
 
       maxMemory = mkOption {
         default = 64;
@@ -68,31 +64,40 @@ in
 
   config = mkIf config.services.memcached.enable {
 
-    users.extraUsers.memcached =
-      { name = cfg.user;
-        uid = config.ids.uids.memcached;
-        description = "Memcached server user";
-      };
+    users.extraUsers = optional (cfg.user == "memcached") {
+      name = "memcached";
+      description = "Memcached server user";
+    };
 
     environment.systemPackages = [ memcached ];
 
-    systemd.services.memcached =
-      { description = "Memcached server";
-
-        wantedBy = [ "multi-user.target" ];
-        after = [ "network.target" ];
-
-        serviceConfig = {
-          ExecStart =
-            let
-              networking = if cfg.socket != ""
-                then "-s ${cfg.socket}"
-                else "-l ${cfg.listen} -p ${toString cfg.port}";
-            in "${memcached}/bin/memcached ${networking} -m ${toString cfg.maxMemory} -c ${toString cfg.maxConnections} ${concatStringsSep " " cfg.extraOptions}";
-
-          User = cfg.user;
-        };
+    systemd.services.memcached = {
+      description = "Memcached server";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        PermissionsStartOnly = true;
+        ExecStartPre = optionals cfg.enableUnixSocket [
+          "${pkgs.coreutils}/bin/install -d -o ${cfg.user} /run/memcached/"
+          "${pkgs.coreutils}/bin/chown -R ${cfg.user} /run/memcached/"
+        ];
+        ExecStart =
+        let
+          networking = if cfg.enableUnixSocket
+          then "-s /run/memcached/memcached.sock"
+          else "-l ${cfg.listen} -p ${toString cfg.port}";
+        in "${memcached}/bin/memcached ${networking} -m ${toString cfg.maxMemory} -c ${toString cfg.maxConnections} ${concatStringsSep " " cfg.extraOptions}";
+
+        User = cfg.user;
       };
+    };
   };
+  imports = [
+    (mkRemovedOptionModule ["services" "memcached" "socket"] ''
+      This option was replaced by a fixed unix socket path at /run/memcached/memcached.sock enabled using services.memached.enableUnixSocket.
+    '')
+  ];
 
 }
diff --git a/nixos/modules/services/databases/mysql.nix b/nixos/modules/services/databases/mysql.nix
index a3bf4f9ba925..21a131b90a81 100644
--- a/nixos/modules/services/databases/mysql.nix
+++ b/nixos/modules/services/databases/mysql.nix
@@ -7,14 +7,12 @@ let
   cfg = config.services.mysql;
 
   mysql = cfg.package;
-  
-  isMariaDB = 
+
+  isMariaDB =
     let
       pName = _p: (builtins.parseDrvName (_p.name)).name;
     in pName mysql == pName pkgs.mariadb;
 
-  atLeast55 = versionAtLeast mysql.mysqlVersion "5.5";
-
   pidFile = "${cfg.pidDir}/mysqld.pid";
 
   mysqldOptions =
@@ -28,13 +26,6 @@ let
     ${optionalString (cfg.bind != null) "bind-address = ${cfg.bind}" }
     ${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "log-bin=mysql-bin"}
     ${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "server-id = ${toString cfg.replication.serverId}"}
-    ${optionalString (cfg.replication.role == "slave" && !atLeast55)
-    ''
-      master-host = ${cfg.replication.masterHost}
-      master-user = ${cfg.replication.masterUser}
-      master-password = ${cfg.replication.masterPassword}
-      master-port = ${toString cfg.replication.masterPort}
-    ''}
     ${optionalString (cfg.ensureUsers != [])
     ''
       plugin-load-add = auth_socket.so
@@ -142,7 +133,7 @@ in
         '';
         example = [
           "nextcloud"
-          "piwik"
+          "matomo"
         ];
       };
 
@@ -298,10 +289,10 @@ in
                     # Create initial databases
                     if ! test -e "${cfg.dataDir}/${database.name}"; then
                         echo "Creating initial database: ${database.name}"
-                        ( echo "create database ${database.name};"
+                        ( echo 'create database `${database.name}`;'
 
                           ${optionalString (database ? "schema") ''
-                          echo "use ${database.name};"
+                          echo 'use `${database.name}`;'
 
                           if [ -f "${database.schema}" ]
                           then
@@ -315,7 +306,7 @@ in
                     fi
                   '') cfg.initialDatabases}
 
-                ${optionalString (cfg.replication.role == "master" && atLeast55)
+                ${optionalString (cfg.replication.role == "master")
                   ''
                     # Set up the replication master
 
@@ -326,7 +317,7 @@ in
                     ) | ${mysql}/bin/mysql -u root -N
                   ''}
 
-                ${optionalString (cfg.replication.role == "slave" && atLeast55)
+                ${optionalString (cfg.replication.role == "slave")
                   ''
                     # Set up the replication slave
 
diff --git a/nixos/modules/services/databases/openldap.nix b/nixos/modules/services/databases/openldap.nix
index e884098cb08d..a67c61eb9949 100644
--- a/nixos/modules/services/databases/openldap.nix
+++ b/nixos/modules/services/databases/openldap.nix
@@ -7,8 +7,10 @@ let
   cfg = config.services.openldap;
   openldap = pkgs.openldap;
 
+  dataFile = pkgs.writeText "ldap-contents.ldif" cfg.declarativeContents;
   configFile = pkgs.writeText "slapd.conf" cfg.extraConfig;
-
+  configOpts = if cfg.configDir == null then "-f ${configFile}"
+               else "-F ${cfg.configDir}";
 in
 
 {
@@ -81,6 +83,34 @@ in
             '''
           '';
       };
+
+      declarativeContents = mkOption {
+        type = with types; nullOr lines;
+        default = null;
+        description = ''
+          Declarative contents for the LDAP database, in LDIF format.
+
+          Note a few facts when using it. First, the database
+          <emphasis>must</emphasis> be stored in the directory defined by
+          <code>dataDir</code>. Second, all <code>dataDir</code> will be erased
+          when starting the LDAP server. Third, modifications to the database
+          are not prevented, they are just dropped on the next reboot of the
+          server. Finally, performance-wise the database and indexes are rebuilt
+          on each server startup, so this will slow down server startup,
+          especially with large databases.
+        '';
+        example = ''
+          dn: dc=example,dc=org
+          objectClass: domain
+          dc: example
+
+          dn: ou=users,dc=example,dc=org
+          objectClass = organizationalUnit
+          ou: users
+
+          # ...
+        '';
+      };
     };
 
   };
@@ -88,7 +118,7 @@ in
 
   ###### implementation
 
-  config = mkIf config.services.openldap.enable {
+  config = mkIf cfg.enable {
 
     environment.systemPackages = [ openldap ];
 
@@ -98,11 +128,21 @@ in
       after = [ "network.target" ];
       preStart = ''
         mkdir -p /var/run/slapd
-        chown -R ${cfg.user}:${cfg.group} /var/run/slapd
-        mkdir -p ${cfg.dataDir}
-        chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
+        chown -R "${cfg.user}:${cfg.group}" /var/run/slapd
+        ${optionalString (cfg.declarativeContents != null) ''
+          rm -Rf "${cfg.dataDir}"
+        ''}
+        mkdir -p "${cfg.dataDir}"
+        ${optionalString (cfg.declarativeContents != null) ''
+          ${openldap.out}/bin/slapadd ${configOpts} -l ${dataFile}
+        ''}
+        chown -R "${cfg.user}:${cfg.group}" "${cfg.dataDir}"
       '';
-      serviceConfig.ExecStart = "${openldap.out}/libexec/slapd -u ${cfg.user} -g ${cfg.group} -d 0 -h \"${concatStringsSep " " cfg.urlList}\" ${if cfg.configDir == null then "-f "+configFile else "-F "+cfg.configDir}";
+      serviceConfig.ExecStart =
+        "${openldap.out}/libexec/slapd -d 0 " +
+          "-u '${cfg.user}' -g '${cfg.group}' " +
+          "-h '${concatStringsSep " " cfg.urlList}' " +
+          "${configOpts}";
     };
 
     users.extraUsers.openldap =
diff --git a/nixos/modules/services/databases/pgmanage.nix b/nixos/modules/services/databases/pgmanage.nix
index 86733a3e5a07..d1b48c06440e 100644
--- a/nixos/modules/services/databases/pgmanage.nix
+++ b/nixos/modules/services/databases/pgmanage.nix
@@ -22,7 +22,7 @@ let
 
       web_root = ${cfg.package}/etc/pgmanage/web_root
 
-      data_root = ${cfg.dataRoot}
+      sql_root = ${cfg.sqlRoot}
 
       ${optionalString (!isNull cfg.tls) ''
       tls_cert = ${cfg.tls.cert}
@@ -130,7 +130,7 @@ let
       '';
     };
 
-    dataRoot = mkOption {
+    sqlRoot = mkOption {
       type = types.str;
       default = "/var/lib/pgmanage";
       description = ''
@@ -210,7 +210,7 @@ in {
         users."${pgmanage}" = {
           name  = pgmanage;
           group = pgmanage;
-          home  = cfg.dataRoot;
+          home  = cfg.sqlRoot;
           createHome = true;
         };
         groups."${pgmanage}" = {
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 9b5e3735239f..f022e0863dfd 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -36,9 +36,6 @@ let
       ${cfg.extraConfig}
     '';
 
-  pre84 = versionOlder (builtins.parseDrvName postgresql.name).version "8.4";
-
-
 in
 
 {
@@ -122,7 +119,7 @@ in
       extraPlugins = mkOption {
         type = types.listOf types.path;
         default = [];
-        example = literalExample "[ (pkgs.postgis.override { postgresql = pkgs.postgresql94; }).v_2_1_4 ]";
+        example = literalExample "[ (pkgs.postgis.override { postgresql = pkgs.postgresql94; }) ]";
         description = ''
           When this list contains elements a new store path is created.
           PostgreSQL and the elements are symlinked into it. Then pg_config,
@@ -182,7 +179,7 @@ in
     services.postgresql.authentication = mkAfter
       ''
         # Generated file; do not edit!
-        local all all              ident ${optionalString pre84 "sameuser"}
+        local all all              ident
         host  all all 127.0.0.1/32 md5
         host  all all ::1/128      md5
       '';
diff --git a/nixos/modules/services/databases/redis.nix b/nixos/modules/services/databases/redis.nix
index a039ad138f6f..e4e38a4364a0 100644
--- a/nixos/modules/services/databases/redis.nix
+++ b/nixos/modules/services/databases/redis.nix
@@ -219,7 +219,6 @@ in
 
     users.extraUsers.redis =
       { name = cfg.user;
-        uid = config.ids.uids.redis;
         description = "Redis database user";
       };
 
diff --git a/nixos/modules/services/desktops/gnome3/at-spi2-core.nix b/nixos/modules/services/desktops/gnome3/at-spi2-core.nix
index 55ed2d9ee21b..cca98c43dc7a 100644
--- a/nixos/modules/services/desktops/gnome3/at-spi2-core.nix
+++ b/nixos/modules/services/desktops/gnome3/at-spi2-core.nix
@@ -28,14 +28,15 @@ with lib;
 
   ###### implementation
 
-  config = mkIf config.services.gnome3.at-spi2-core.enable {
-
-    environment.systemPackages = [ pkgs.at_spi2_core ];
-
-    services.dbus.packages = [ pkgs.at_spi2_core ];
-
-    systemd.packages = [ pkgs.at_spi2_core ];
-
-  };
-
+  config = mkMerge [
+    (mkIf config.services.gnome3.at-spi2-core.enable {
+      environment.systemPackages = [ pkgs.at-spi2-core ];
+      services.dbus.packages = [ pkgs.at-spi2-core ];
+      systemd.packages = [ pkgs.at-spi2-core ];
+    })
+
+    (mkIf (!config.services.gnome3.at-spi2-core.enable) {
+      environment.variables.NO_AT_BRIDGE = "1";
+    })
+  ];
 }
diff --git a/nixos/modules/services/desktops/gnome3/chrome-gnome-shell.nix b/nixos/modules/services/desktops/gnome3/chrome-gnome-shell.nix
new file mode 100644
index 000000000000..2740a22c7ca0
--- /dev/null
+++ b/nixos/modules/services/desktops/gnome3/chrome-gnome-shell.nix
@@ -0,0 +1,27 @@
+# Chrome GNOME Shell native host connector.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  ###### interface
+  options = {
+    services.gnome3.chrome-gnome-shell.enable = mkEnableOption ''
+      Chrome GNOME Shell native host connector, a DBus service
+      allowing to install GNOME Shell extensions from a web browser.
+    '';
+  };
+
+
+  ###### implementation
+  config = mkIf config.services.gnome3.chrome-gnome-shell.enable {
+    environment.etc = {
+      "chromium/native-messaging-hosts/org.gnome.chrome_gnome_shell.json".source = "${pkgs.chrome-gnome-shell}/etc/chromium/native-messaging-hosts/org.gnome.chrome_gnome_shell.json";
+      "opt/chrome/native-messaging-hosts/org.gnome.chrome_gnome_shell.json".source = "${pkgs.chrome-gnome-shell}/etc/opt/chrome/native-messaging-hosts/org.gnome.chrome_gnome_shell.json";
+    };
+
+    environment.systemPackages = [ pkgs.chrome-gnome-shell ];
+
+    services.dbus.packages = [ pkgs.chrome-gnome-shell ];
+  };
+}
diff --git a/nixos/modules/services/desktops/gnome3/evolution-data-server.nix b/nixos/modules/services/desktops/gnome3/evolution-data-server.nix
index 86a47488d865..7e312a1b81eb 100644
--- a/nixos/modules/services/desktops/gnome3/evolution-data-server.nix
+++ b/nixos/modules/services/desktops/gnome3/evolution-data-server.nix
@@ -30,11 +30,11 @@ with lib;
 
   config = mkIf config.services.gnome3.evolution-data-server.enable {
 
-    environment.systemPackages = [ pkgs.gnome3.evolution_data_server ];
+    environment.systemPackages = [ pkgs.gnome3.evolution-data-server ];
 
-    services.dbus.packages = [ pkgs.gnome3.evolution_data_server ];
+    services.dbus.packages = [ pkgs.gnome3.evolution-data-server ];
 
-    systemd.packages = [ pkgs.gnome3.evolution_data_server ];
+    systemd.packages = [ pkgs.gnome3.evolution-data-server ];
 
   };
 
diff --git a/nixos/modules/services/desktops/gnome3/gnome-keyring.nix b/nixos/modules/services/desktops/gnome3/gnome-keyring.nix
index 2a68af5a7dd8..aa1165ab3bba 100644
--- a/nixos/modules/services/desktops/gnome3/gnome-keyring.nix
+++ b/nixos/modules/services/desktops/gnome3/gnome-keyring.nix
@@ -31,9 +31,9 @@ with lib;
 
   config = mkIf config.services.gnome3.gnome-keyring.enable {
 
-    environment.systemPackages = [ pkgs.gnome3.gnome_keyring ];
+    environment.systemPackages = [ pkgs.gnome3.gnome-keyring ];
 
-    services.dbus.packages = [ pkgs.gnome3.gnome_keyring pkgs.gnome3.gcr ];
+    services.dbus.packages = [ pkgs.gnome3.gnome-keyring pkgs.gnome3.gcr ];
 
   };
 
diff --git a/nixos/modules/services/desktops/gnome3/gnome-online-accounts.nix b/nixos/modules/services/desktops/gnome3/gnome-online-accounts.nix
index 0da4aca73ecb..4286251357f7 100644
--- a/nixos/modules/services/desktops/gnome3/gnome-online-accounts.nix
+++ b/nixos/modules/services/desktops/gnome3/gnome-online-accounts.nix
@@ -30,9 +30,9 @@ with lib;
 
   config = mkIf config.services.gnome3.gnome-online-accounts.enable {
 
-    environment.systemPackages = [ pkgs.gnome3.gnome_online_accounts ];
+    environment.systemPackages = [ pkgs.gnome3.gnome-online-accounts ];
 
-    services.dbus.packages = [ pkgs.gnome3.gnome_online_accounts ];
+    services.dbus.packages = [ pkgs.gnome3.gnome-online-accounts ];
 
   };
 
diff --git a/nixos/modules/services/desktops/gnome3/gnome-terminal-server.nix b/nixos/modules/services/desktops/gnome3/gnome-terminal-server.nix
index 3ac767bfa00d..fd14efee5f2e 100644
--- a/nixos/modules/services/desktops/gnome3/gnome-terminal-server.nix
+++ b/nixos/modules/services/desktops/gnome3/gnome-terminal-server.nix
@@ -30,11 +30,11 @@ with lib;
 
   config = mkIf config.services.gnome3.gnome-terminal-server.enable {
 
-    environment.systemPackages = [ pkgs.gnome3.gnome_terminal ];
+    environment.systemPackages = [ pkgs.gnome3.gnome-terminal ];
 
-    services.dbus.packages = [ pkgs.gnome3.gnome_terminal ];
+    services.dbus.packages = [ pkgs.gnome3.gnome-terminal ];
 
-    systemd.packages = [ pkgs.gnome3.gnome_terminal ];
+    systemd.packages = [ pkgs.gnome3.gnome-terminal ];
 
   };
 
diff --git a/nixos/modules/services/desktops/gnome3/tracker-miners.nix b/nixos/modules/services/desktops/gnome3/tracker-miners.nix
new file mode 100644
index 000000000000..20154fc2fed3
--- /dev/null
+++ b/nixos/modules/services/desktops/gnome3/tracker-miners.nix
@@ -0,0 +1,41 @@
+# Tracker Miners daemons.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.gnome3.tracker-miners = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable Tracker miners, indexing services for Tracker
+          search engine and metadata storage system.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.gnome3.tracker-miners.enable {
+
+    environment.systemPackages = [ pkgs.gnome3.tracker-miners ];
+
+    services.dbus.packages = [ pkgs.gnome3.tracker-miners ];
+
+    systemd.packages = [ pkgs.gnome3.tracker-miners ];
+
+  };
+
+}
diff --git a/nixos/modules/services/desktops/pipewire.nix b/nixos/modules/services/desktops/pipewire.nix
new file mode 100644
index 000000000000..263a06156f84
--- /dev/null
+++ b/nixos/modules/services/desktops/pipewire.nix
@@ -0,0 +1,23 @@
+# pipewire service.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  ###### interface
+  options = {
+    services.pipewire = {
+      enable = mkEnableOption "pipewire service";
+    };
+  };
+
+
+  ###### implementation
+  config = mkIf config.services.pipewire.enable {
+    environment.systemPackages = [ pkgs.pipewire ];
+
+    systemd.packages = [ pkgs.pipewire ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ jtojnar ];
+}
diff --git a/nixos/modules/services/desktops/telepathy.nix b/nixos/modules/services/desktops/telepathy.nix
index 2554f3a1666f..f5401c180984 100644
--- a/nixos/modules/services/desktops/telepathy.nix
+++ b/nixos/modules/services/desktops/telepathy.nix
@@ -30,9 +30,9 @@ with lib;
 
   config = mkIf config.services.telepathy.enable {
 
-    environment.systemPackages = [ pkgs.telepathy_mission_control ];
+    environment.systemPackages = [ pkgs.telepathy-mission-control ];
 
-    services.dbus.packages = [ pkgs.telepathy_mission_control ];
+    services.dbus.packages = [ pkgs.telepathy-mission-control ];
 
   };
 
diff --git a/nixos/modules/services/editors/emacs.nix b/nixos/modules/services/editors/emacs.nix
index 2c5a0c4849ef..ba7ec967919e 100644
--- a/nixos/modules/services/editors/emacs.nix
+++ b/nixos/modules/services/editors/emacs.nix
@@ -7,7 +7,7 @@ let
   cfg = config.services.emacs;
 
   editorScript = pkgs.writeScriptBin "emacseditor" ''
-    #!${pkgs.stdenv.shell}
+    #!${pkgs.runtimeShell}
     if [ -z "$1" ]; then
       exec ${cfg.package}/bin/emacsclient --create-frame --alternate-editor ${cfg.package}/bin/emacs
     else
@@ -15,6 +15,25 @@ let
     fi
   '';
 
+desktopApplicationFile = pkgs.writeTextFile {
+  name = "emacsclient.desktop";
+  destination = "/share/applications/emacsclient.desktop";
+  text = ''
+[Desktop Entry]
+Name=Emacsclient
+GenericName=Text Editor
+Comment=Edit text
+MimeType=text/english;text/plain;text/x-makefile;text/x-c++hdr;text/x-c++src;text/x-chdr;text/x-csrc;text/x-java;text/x-moc;text/x-pascal;text/x-tcl;text/x-tex;application/x-shellscript;text/x-c;text/x-c++;
+Exec=emacseditor %F
+Icon=emacs
+Type=Application
+Terminal=false
+Categories=Development;TextEditor;
+StartupWMClass=Emacs
+Keywords=Text;Editor;
+'';
+};
+
 in {
 
   options.services.emacs = {
@@ -74,7 +93,7 @@ in {
       };
     } // optionalAttrs cfg.enable { wantedBy = [ "default.target" ]; };
 
-    environment.systemPackages = [ cfg.package editorScript ];
+    environment.systemPackages = [ cfg.package editorScript desktopApplicationFile ];
 
     environment.variables = {
       # This is required so that GTK applications launched from Emacs
diff --git a/nixos/modules/services/games/factorio.nix b/nixos/modules/services/games/factorio.nix
index 1dc8ce93a0e5..3f6bf9de8931 100644
--- a/nixos/modules/services/games/factorio.nix
+++ b/nixos/modules/services/games/factorio.nix
@@ -6,7 +6,7 @@ let
   cfg = config.services.factorio;
   factorio = pkgs.factorio-headless;
   name = "Factorio";
-  stateDir = "/var/lib/factorio";
+  stateDir = cfg.stateDir;
   mkSavePath = name: "${stateDir}/saves/${name}.zip";
   configFile = pkgs.writeText "factorio.conf" ''
     use-system-read-write-data-directories=true
@@ -25,7 +25,7 @@ let
     password = cfg.password;
     token = cfg.token;
     game_password = cfg.game-password;
-    require_user_verification = true;
+    require_user_verification = cfg.requireUserVerification;
     max_upload_in_kilobytes_per_second = 0;
     minimum_latency_in_ticks = 0;
     ignore_player_limit_for_returning_players = false;
@@ -80,6 +80,15 @@ in
           customizations.
         '';
       };
+      stateDir = mkOption {
+        type = types.path;
+        default = "/var/lib/factorio";
+        description = ''
+          The server's data directory.
+
+          The configuration and map will be stored here.
+        '';
+      };
       mods = mkOption {
         type = types.listOf types.package;
         default = [];
@@ -148,6 +157,13 @@ in
           Game password.
         '';
       };
+      requireUserVerification = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          When set to true, the server will only allow clients that have a valid factorio.com account.
+        '';
+      };
       autosave-interval = mkOption {
         type = types.nullOr types.int;
         default = null;
diff --git a/nixos/modules/services/games/ghost-one.nix b/nixos/modules/services/games/ghost-one.nix
deleted file mode 100644
index 71ff6bb2f3f0..000000000000
--- a/nixos/modules/services/games/ghost-one.nix
+++ /dev/null
@@ -1,105 +0,0 @@
-{ config, lib, pkgs, ... }:
-with lib;
-let
-
-  cfg = config.services.ghostOne;
-  ghostUser = "ghostone";
-  stateDir = "/var/lib/ghost-one";
-
-in
-{
-
-  ###### interface
-
-  options = {
-    services.ghostOne = {
-
-      enable = mkOption {
-        default = false;
-        description = "Enable Ghost-One Warcraft3 game hosting server.";
-      };
-
-      language = mkOption {
-        default = "English";
-        type = types.enum [ "English" "Spanish" "Russian" "Serbian" "Turkish" ];
-        description = "The language of bot messages: English, Spanish, Russian, Serbian or Turkish.";
-      };
-
-      war3path = mkOption {
-        default = "";
-        description = ''
-          The path to your local Warcraft III directory, which must contain war3.exe, storm.dll, and game.dll.
-        '';
-      };
-
-      mappath = mkOption {
-        default = "";
-        description = ''
-          The path to the directory where you keep your map files. GHost One doesn't require
-          map files but if it has access to them it can send them to players and automatically
-          calculate most map config values. GHost One will search [bot_mappath + map_localpath]
-          for the map file (map_localpath is set in each map's config file).
-        '';
-      };
-
-      config = mkOption {
-        default = "";
-        description = "Extra configuration options.";
-      };
-
-    };
-  };
-
-  ###### implementation
-
-  config = mkIf cfg.enable {
-
-    users.extraUsers = singleton
-      { name = ghostUser;
-        uid = config.ids.uids.ghostone;
-        description = "Ghost One game server user";
-        home = stateDir;
-      };
-
-    users.extraGroups = singleton
-      { name = ghostUser;
-        gid = config.ids.gids.ghostone;
-      };
-
-    services.ghostOne.config = ''
-#      bot_log = /dev/stderr
-      bot_language = ${pkgs.ghostOne}/share/ghost-one/languages/${cfg.language}.cfg
-      bot_war3path = ${cfg.war3path}
-
-      bot_mapcfgpath = mapcfgs
-      bot_savegamepath = savegames
-      bot_mappath = ${cfg.mappath}
-      bot_replaypath = replays
-    '';
-
-    systemd.services."ghost-one" = {
-      wantedBy = [ "multi-user.target" ];
-      script = ''
-        mkdir -p ${stateDir}
-        cd ${stateDir}
-        chown ${ghostUser}:${ghostUser} .
-
-        mkdir -p mapcfgs
-        chown ${ghostUser}:${ghostUser} mapcfgs
-
-        mkdir -p replays
-        chown ${ghostUser}:${ghostUser} replays
-
-        mkdir -p savegames
-        chown ${ghostUser}:${ghostUser} savegames
-
-        ln -sf ${pkgs.writeText "ghost.cfg" cfg.config} ghost.cfg
-        ln -sf ${pkgs.ghostOne}/share/ghost-one/ip-to-country.csv
-        ${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${ghostUser} \
-          -c "LANG=C ${pkgs.ghostOne}/bin/ghost++"
-      '';
-    };
-
-  };
-
-}
diff --git a/nixos/modules/services/hardware/acpid.nix b/nixos/modules/services/hardware/acpid.nix
index bb17c8859d84..0f05876aee32 100644
--- a/nixos/modules/services/hardware/acpid.nix
+++ b/nixos/modules/services/hardware/acpid.nix
@@ -31,7 +31,7 @@ let
           ''
             fn=$out/${name}
             echo "event=${handler.event}" > $fn
-            echo "action=${pkgs.writeScript "${name}.sh" (concatStringsSep "\n" [ "#! ${pkgs.bash}/bin/sh" handler.action ])}" >> $fn
+            echo "action=${pkgs.writeShellScriptBin "${name}.sh" handler.action }/bin/${name}.sh '%e'" >> $fn
           '';
         in concatStringsSep "\n" (mapAttrsToList f (canonicalHandlers // config.services.acpid.handlers))
       }
@@ -53,6 +53,12 @@ in
         description = "Whether to enable the ACPI daemon.";
       };
 
+      logEvents = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Log all event activity.";
+      };
+
       handlers = mkOption {
         type = types.attrsOf (types.submodule {
           options = {
@@ -69,11 +75,33 @@ in
           };
         });
 
-        description = "Event handlers.";
-        default = {};
-        example = { mute = { event = "button/mute.*"; action = "amixer set Master toggle"; }; };
-
+        description = ''
+          Event handlers.
 
+          <note><para>
+            Handler can be a single command.
+          </para></note>
+        '';
+        default = {};
+        example = {
+          ac-power = {
+            event = "ac_adapter/*";
+            action = ''
+              vals=($1)  # space separated string to array of multiple values
+              case ''${vals[3]} in
+                  00000000)
+                      echo unplugged >> /tmp/acpi.log
+                      ;;
+                  00000001)
+                      echo plugged in >> /tmp/acpi.log
+                      ;;
+                  *)
+                      echo unknown >> /tmp/acpi.log
+                      ;;
+              esac
+            '';
+          };
+        };
       };
 
       powerEventCommands = mkOption {
@@ -120,7 +148,7 @@ in
         ConditionPathExists = [ "/proc/acpi" ];
       };
 
-      script = "acpid --confdir ${acpiConfDir}";
+      script = "acpid ${optionalString config.services.acpid.logEvents "--logevents"} --confdir ${acpiConfDir}";
     };
 
   };
diff --git a/nixos/modules/services/hardware/amd-hybrid-graphics.nix b/nixos/modules/services/hardware/amd-hybrid-graphics.nix
deleted file mode 100644
index b0f9ff56d1b2..000000000000
--- a/nixos/modules/services/hardware/amd-hybrid-graphics.nix
+++ /dev/null
@@ -1,46 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-{
-
-  ###### interface
-
-  options = {
-
-    hardware.amdHybridGraphics.disable = lib.mkOption {
-      default = false;
-      type = lib.types.bool;
-      description = ''
-        Completely disable the AMD graphics card and use the
-        integrated graphics processor instead.
-      '';
-    };
-
-  };
-
-
-  ###### implementation
-
-  config = lib.mkIf config.hardware.amdHybridGraphics.disable {
-    systemd.services."amd-hybrid-graphics" = {
-      path = [ pkgs.bash ];
-      description = "Disable AMD Card";
-      after = [ "sys-kernel-debug.mount" ];
-      before = [ "systemd-vconsole-setup.service" "display-manager.service" ];
-      requires = [ "sys-kernel-debug.mount" "vgaswitcheroo.path" ];
-      serviceConfig = {
-        Type = "oneshot";
-        RemainAfterExit = true;
-        ExecStart = "${pkgs.bash}/bin/sh -c 'echo -e \"IGD\\nOFF\" > /sys/kernel/debug/vgaswitcheroo/switch'";
-        ExecStop = "${pkgs.bash}/bin/sh -c 'echo ON >/sys/kernel/debug/vgaswitcheroo/switch'";
-      };
-    };
-    systemd.paths."vgaswitcheroo" = {
-      pathConfig = {
-        PathExists = "/sys/kernel/debug/vgaswitcheroo/switch";
-        Unit = "amd-hybrid-graphics.service";
-      };
-      wantedBy = ["multi-user.target"];
-    };
-  };
-
-}
diff --git a/nixos/modules/services/hardware/bluetooth.nix b/nixos/modules/services/hardware/bluetooth.nix
index 4a8cd86b0b11..d7ca8a431794 100644
--- a/nixos/modules/services/hardware/bluetooth.nix
+++ b/nixos/modules/services/hardware/bluetooth.nix
@@ -3,8 +3,8 @@
 with lib;
 
 let
-  bluez-bluetooth = pkgs.bluez;
   cfg = config.hardware.bluetooth;
+  bluez-bluetooth = cfg.package;
 
 in {
 
@@ -21,6 +21,16 @@ in {
         description = "Whether to power up the default Bluetooth controller on boot.";
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.bluez;
+        defaultText = "pkgs.bluez";
+        example = "pkgs.bluez.override { enableMidi = true; }";
+        description = ''
+          Which BlueZ package to use.
+        '';
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         default = "";
diff --git a/nixos/modules/services/hardware/fwupd.nix b/nixos/modules/services/hardware/fwupd.nix
new file mode 100644
index 000000000000..d8abde2a600a
--- /dev/null
+++ b/nixos/modules/services/hardware/fwupd.nix
@@ -0,0 +1,90 @@
+# fwupd daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.fwupd;
+  originalEtc =
+    let
+      mkEtcFile = n: nameValuePair n { source = "${pkgs.fwupd}/etc/${n}"; };
+    in listToAttrs (map mkEtcFile pkgs.fwupd.filesInstalledToEtc);
+  extraTrustedKeys =
+    let
+      mkName = p: "pki/fwupd/${baseNameOf (toString p)}";
+      mkEtcFile = p: nameValuePair (mkName p) { source = p; };
+    in listToAttrs (map mkEtcFile cfg.extraTrustedKeys);
+in {
+
+  ###### interface
+  options = {
+    services.fwupd = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable fwupd, a DBus service that allows
+          applications to update firmware.
+        '';
+      };
+
+      blacklistDevices = mkOption {
+        type = types.listOf types.string;
+        default = [];
+        example = [ "2082b5e0-7a64-478a-b1b2-e3404fab6dad" ];
+        description = ''
+          Allow blacklisting specific devices by their GUID
+        '';
+      };
+
+      blacklistPlugins = mkOption {
+        type = types.listOf types.string;
+        default = [];
+        example = [ "udev" ];
+        description = ''
+          Allow blacklisting specific plugins
+        '';
+      };
+
+      extraTrustedKeys = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        example = literalExample "[ /etc/nixos/fwupd/myfirmware.pem ]";
+        description = ''
+          Installing a public key allows firmware signed with a matching private key to be recognized as trusted, which may require less authentication to install than for untrusted files. By default trusted firmware can be upgraded (but not downgraded) without the user or administrator password. Only very few keys are installed by default.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.fwupd ];
+
+    environment.etc = {
+      "fwupd/daemon.conf" = {
+        source = pkgs.writeText "daemon.conf" ''
+          [fwupd]
+          BlacklistDevices=${lib.concatStringsSep ";" cfg.blacklistDevices}
+          BlacklistPlugins=${lib.concatStringsSep ";" cfg.blacklistPlugins}
+        '';
+      };
+    } // originalEtc // extraTrustedKeys;
+
+    services.dbus.packages = [ pkgs.fwupd ];
+
+    services.udev.packages = [ pkgs.fwupd ];
+
+    systemd.packages = [ pkgs.fwupd ];
+
+    systemd.tmpfiles.rules = [
+      "d /var/lib/fwupd 0755 root root -"
+    ];
+  };
+
+  meta = {
+    maintainers = pkgs.fwupd.maintainers;
+  };
+}
diff --git a/nixos/modules/services/hardware/nvidia-optimus.nix b/nixos/modules/services/hardware/nvidia-optimus.nix
index 9fe4021c4247..eb1713baa140 100644
--- a/nixos/modules/services/hardware/nvidia-optimus.nix
+++ b/nixos/modules/services/hardware/nvidia-optimus.nix
@@ -23,7 +23,7 @@ let kernel = config.boot.kernelPackages; in
   ###### implementation
 
   config = lib.mkIf config.hardware.nvidiaOptimus.disable {
-    boot.blacklistedKernelModules = ["nouveau" "nvidia" "nvidiafb"];
+    boot.blacklistedKernelModules = ["nouveau" "nvidia" "nvidiafb" "nvidia-drm"];
     boot.kernelModules = [ "bbswitch" ];
     boot.extraModulePackages = [ kernel.bbswitch ];
 
diff --git a/nixos/modules/services/hardware/thinkfan.nix b/nixos/modules/services/hardware/thinkfan.nix
index 018e82e58a3d..5a898631e090 100644
--- a/nixos/modules/services/hardware/thinkfan.nix
+++ b/nixos/modules/services/hardware/thinkfan.nix
@@ -55,7 +55,7 @@ in {
       enable = mkOption {
         default = false;
         description = ''
-          Whether to enable thinkfan, fan controller for ibm/lenovo thinkpads.
+          Whether to enable thinkfan, fan controller for IBM/Lenovo ThinkPads.
         '';
       };
 
diff --git a/nixos/modules/services/hardware/trezord.nix b/nixos/modules/services/hardware/trezord.nix
index 38d0a3a1d752..fa0496114684 100644
--- a/nixos/modules/services/hardware/trezord.nix
+++ b/nixos/modules/services/hardware/trezord.nix
@@ -38,7 +38,7 @@ in {
       path = [];
       serviceConfig = {
         Type = "simple";
-        ExecStart = "${pkgs.trezord}/bin/trezord -f";
+        ExecStart = "${pkgs.trezord}/bin/trezord-go";
         User = "trezord";
       };
     };
diff --git a/nixos/modules/services/hardware/u2f.nix b/nixos/modules/services/hardware/u2f.nix
new file mode 100644
index 000000000000..bb4b2f05f890
--- /dev/null
+++ b/nixos/modules/services/hardware/u2f.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.hardware.u2f;
+in {
+  options = {
+    hardware.u2f = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable U2F hardware support.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.libu2f-host ];
+  };
+}
+
diff --git a/nixos/modules/services/hardware/udev.nix b/nixos/modules/services/hardware/udev.nix
index 9f42f9e59ad5..7bfc3bb64872 100644
--- a/nixos/modules/services/hardware/udev.nix
+++ b/nixos/modules/services/hardware/udev.nix
@@ -146,7 +146,7 @@ let
 
       echo "Generating hwdb database..."
       # hwdb --update doesn't return error code even on errors!
-      res="$(${udev}/bin/udevadm hwdb --update --root=$(pwd) 2>&1)"
+      res="$(${pkgs.buildPackages.udev}/bin/udevadm hwdb --update --root=$(pwd) 2>&1)"
       echo "$res"
       [ -z "$(echo "$res" | egrep '^Error')" ]
       mv etc/udev/hwdb.bin $out
diff --git a/nixos/modules/services/hardware/usbmuxd.nix b/nixos/modules/services/hardware/usbmuxd.nix
new file mode 100644
index 000000000000..7ebd49fa01c2
--- /dev/null
+++ b/nixos/modules/services/hardware/usbmuxd.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  defaultUserGroup = "usbmux";
+  apple = "05ac";
+
+  cfg = config.services.usbmuxd;
+
+in
+
+{
+  options.services.usbmuxd = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable the usbmuxd ("USB multiplexing daemon") service. This daemon is
+        in charge of multiplexing connections over USB to an iOS device. This is
+        needed for transferring data from and to iOS devices (see ifuse). Also
+        this may enable plug-n-play tethering for iPhones.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = defaultUserGroup;
+      description = ''
+        The user usbmuxd should use to run after startup.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = defaultUserGroup;
+      description = ''
+        The group usbmuxd should use to run after startup.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users.extraUsers = optional (cfg.user == defaultUserGroup) {
+      name = cfg.user;
+      description = "usbmuxd user";
+      group = cfg.group;
+    };
+
+    users.extraGroups = optional (cfg.group == defaultUserGroup) {
+      name = cfg.group;
+    };
+
+    # Give usbmuxd permission for Apple devices
+    services.udev.extraRules = ''
+      SUBSYSTEM=="usb", ATTR{idVendor}=="${apple}", GROUP="${cfg.group}"
+    '';
+
+    systemd.services.usbmuxd = {
+      description = "usbmuxd";
+      wantedBy = [ "multi-user.target" ];
+      unitConfig.Documentation = "man:usbmuxd(8)";
+      serviceConfig = {
+        # Trigger the udev rule manually. This doesn't require replugging the
+        # device when first enabling the option to get it to work
+        ExecStartPre = "${pkgs.libudev}/bin/udevadm trigger -s usb -a idVendor=${apple}";
+        ExecStart = "${pkgs.usbmuxd}/bin/usbmuxd -U ${cfg.user} -f";
+      };
+    };
+
+  };
+}
diff --git a/nixos/modules/services/logging/graylog.nix b/nixos/modules/services/logging/graylog.nix
index a0dc0d6d089d..95f31829882f 100644
--- a/nixos/modules/services/logging/graylog.nix
+++ b/nixos/modules/services/logging/graylog.nix
@@ -141,7 +141,7 @@ in
         JAVA_HOME = jre;
         GRAYLOG_CONF = "${confFile}";
       };
-      path = [ pkgs.openjdk8 pkgs.which pkgs.procps ];
+      path = [ pkgs.jre_headless pkgs.which pkgs.procps ];
       preStart = ''
         mkdir -p /var/lib/graylog -m 755
 
diff --git a/nixos/modules/services/logging/logcheck.nix b/nixos/modules/services/logging/logcheck.nix
index 2a8ac414720b..a4cab0c94cdc 100644
--- a/nixos/modules/services/logging/logcheck.nix
+++ b/nixos/modules/services/logging/logcheck.nix
@@ -8,7 +8,7 @@ let
   defaultRules = pkgs.runCommand "logcheck-default-rules" {} ''
                    cp -prd ${pkgs.logcheck}/etc/logcheck $out
                    chmod u+w $out
-                   rm $out/logcheck.*
+                   rm -r $out/logcheck.*
                  '';
 
   rulesDir = pkgs.symlinkJoin
diff --git a/nixos/modules/services/logging/logstash.nix b/nixos/modules/services/logging/logstash.nix
index b4abd2cd7e5e..28d89a7463ab 100644
--- a/nixos/modules/services/logging/logstash.nix
+++ b/nixos/modules/services/logging/logstash.nix
@@ -103,7 +103,7 @@ in
 
       listenAddress = mkOption {
         type = types.str;
-        default = "0.0.0.0";
+        default = "127.0.0.1";
         description = "Address on which to start webserver.";
       };
 
diff --git a/nixos/modules/services/mail/clamsmtp.nix b/nixos/modules/services/mail/clamsmtp.nix
new file mode 100644
index 000000000000..8f4f39aa7288
--- /dev/null
+++ b/nixos/modules/services/mail/clamsmtp.nix
@@ -0,0 +1,179 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.clamsmtp;
+  clamdSocket = "/run/clamav/clamd.ctl"; # See services/security/clamav.nix
+in
+{
+  ##### interface
+  options = {
+    services.clamsmtp = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable clamsmtp.";
+      };
+
+      instances = mkOption {
+        description = "Instances of clamsmtp to run.";
+        type = types.listOf (types.submodule { options = {
+          action = mkOption {
+            type = types.enum [ "bounce" "drop" "pass" ];
+            default = "drop";
+            description =
+              ''
+                Action to take when a virus is detected.
+
+                Note that viruses often spoof sender addresses, so bouncing is
+                in most cases not a good idea.
+              '';
+          };
+
+          header = mkOption {
+            type = types.str;
+            default = "";
+            example = "X-Virus-Scanned: ClamAV using ClamSMTP";
+            description =
+              ''
+                A header to add to scanned messages. See clamsmtpd.conf(5) for
+                more details. Empty means no header.
+              '';
+          };
+
+          keepAlives = mkOption {
+            type = types.int;
+            default = 0;
+            description =
+              ''
+                Number of seconds to wait between each NOOP sent to the sending
+                server. 0 to disable.
+
+                This is meant for slow servers where the sending MTA times out
+                waiting for clamd to scan the file.
+              '';
+          };
+
+          listen = mkOption {
+            type = types.str;
+            example = "127.0.0.1:10025";
+            description =
+              ''
+                Address to wait for incoming SMTP connections on. See
+                clamsmtpd.conf(5) for more details.
+              '';
+          };
+
+          quarantine = mkOption {
+            type = types.bool;
+            default = false;
+            description =
+              ''
+                Whether to quarantine files that contain viruses by leaving them
+                in the temporary directory.
+              '';
+          };
+
+          maxConnections = mkOption {
+            type = types.int;
+            default = 64;
+            description = "Maximum number of connections to accept at once.";
+          };
+
+          outAddress = mkOption {
+            type = types.str;
+            description =
+              ''
+                Address of the SMTP server to send email to once it has been
+                scanned.
+              '';
+          };
+
+          tempDirectory = mkOption {
+            type = types.str;
+            default = "/tmp";
+            description =
+              ''
+                Temporary directory that needs to be accessible to both clamd
+                and clamsmtpd.
+              '';
+          };
+
+          timeout = mkOption {
+            type = types.int;
+            default = 180;
+            description = "Time-out for network connections.";
+          };
+
+          transparentProxy = mkOption {
+            type = types.bool;
+            default = false;
+            description = "Enable clamsmtp's transparent proxy support.";
+          };
+
+          virusAction = mkOption {
+            type = with types; nullOr path;
+            default = null;
+            description =
+              ''
+                Command to run when a virus is found. Please see VIRUS ACTION in
+                clamsmtpd(8) for a discussion of this option and its safe use.
+              '';
+          };
+
+          xClient = mkOption {
+            type = types.bool;
+            default = false;
+            description =
+              ''
+                Send the XCLIENT command to the receiving server, for forwarding
+                client addresses and connection information if the receiving
+                server supports this feature.
+              '';
+          };
+        };});
+      };
+    };
+  };
+
+  ##### implementation
+  config = let
+    configfile = conf: pkgs.writeText "clamsmtpd.conf"
+      ''
+        Action: ${conf.action}
+        ClamAddress: ${clamdSocket}
+        Header: ${conf.header}
+        KeepAlives: ${toString conf.keepAlives}
+        Listen: ${conf.listen}
+        Quarantine: ${if conf.quarantine then "on" else "off"}
+        MaxConnections: ${toString conf.maxConnections}
+        OutAddress: ${conf.outAddress}
+        TempDirectory: ${conf.tempDirectory}
+        TimeOut: ${toString conf.timeout}
+        TransparentProxy: ${if conf.transparentProxy then "on" else "off"}
+        User: clamav
+        ${optionalString (conf.virusAction != null) "VirusAction: ${conf.virusAction}"}
+        XClient: ${if conf.xClient then "on" else "off"}
+      '';
+  in
+    mkIf cfg.enable {
+      assertions = [
+        { assertion = config.services.clamav.daemon.enable;
+          message = "clamsmtp requires clamav to be enabled";
+        }
+      ];
+
+      systemd.services = listToAttrs (imap1 (i: conf:
+        nameValuePair "clamsmtp-${toString i}" {
+          description = "ClamSMTP instance ${toString i}";
+          wantedBy = [ "multi-user.target" ];
+          script = "exec ${pkgs.clamsmtp}/bin/clamsmtpd -f ${configfile conf}";
+          after = [ "clamav-daemon.service" ];
+          requires = [ "clamav-daemon.service" ];
+          serviceConfig.Type = "forking";
+          serviceConfig.PrivateTmp = "yes";
+          unitConfig.JoinsNamespaceOf = "clamav-daemon.service";
+        }
+      ) cfg.instances);
+    };
+}
diff --git a/nixos/modules/services/mail/dkimproxy-out.nix b/nixos/modules/services/mail/dkimproxy-out.nix
new file mode 100644
index 000000000000..894b88e25c1b
--- /dev/null
+++ b/nixos/modules/services/mail/dkimproxy-out.nix
@@ -0,0 +1,118 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.dkimproxy-out;
+  keydir = "/var/lib/dkimproxy-out";
+  privkey = "${keydir}/private.key";
+  pubkey = "${keydir}/public.key";
+in
+{
+  ##### interface
+  options = {
+    services.dkimproxy-out = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            Whether to enable dkimproxy_out.
+
+            Note that a key will be auto-generated, and can be found in
+            ${keydir}.
+          '';
+      };
+
+      listen = mkOption {
+        type = types.str;
+        example = "127.0.0.1:10027";
+        description = "Address:port DKIMproxy should listen on.";
+      };
+
+      relay = mkOption {
+        type = types.str;
+        example = "127.0.0.1:10028";
+        description = "Address:port DKIMproxy should forward mail to.";
+      };
+
+      domains = mkOption {
+        type = with types; listOf str;
+        example = [ "example.org" "example.com" ];
+        description = "List of domains DKIMproxy can sign for.";
+      };
+
+      selector = mkOption {
+        type = types.str;
+        example = "selector1";
+        description =
+          ''
+            The selector to use for DKIM key identification.
+
+            For example, if 'selector1' is used here, then for each domain
+            'example.org' given in `domain`, 'selector1._domainkey.example.org'
+            should contain the TXT record indicating the public key is the one
+            in ${pubkey}: "v=DKIM1; t=s; p=[THE PUBLIC KEY]".
+          '';
+      };
+
+      keySize = mkOption {
+        type = types.int;
+        default = 2048;
+        description =
+          ''
+            Size of the RSA key to use to sign outgoing emails. Note that the
+            maximum mandatorily verified as per RFC6376 is 2048.
+          '';
+      };
+
+      # TODO: allow signature for other schemes than dkim(c=relaxed/relaxed)?
+      # This being the scheme used by gmail, maybe nothing more is needed for
+      # reasonable use.
+    };
+  };
+
+  ##### implementation
+  config = let
+    configfile = pkgs.writeText "dkimproxy_out.conf"
+      ''
+        listen ${cfg.listen}
+        relay ${cfg.relay}
+
+        domain ${concatStringsSep "," cfg.domains}
+        selector ${cfg.selector}
+
+        signature dkim(c=relaxed/relaxed)
+
+        keyfile ${privkey}
+      '';
+  in
+    mkIf cfg.enable {
+      users.groups.dkimproxy-out = {};
+      users.users.dkimproxy-out = {
+        description = "DKIMproxy_out daemon";
+        group = "dkimproxy-out";
+        isSystemUser = true;
+      };
+
+      systemd.services.dkimproxy-out = {
+        description = "DKIMproxy_out";
+        wantedBy = [ "multi-user.target" ];
+        preStart = ''
+          if [ ! -d "${keydir}" ]; then
+            mkdir -p "${keydir}"
+            chmod 0700 "${keydir}"
+            ${pkgs.openssl}/bin/openssl genrsa -out "${privkey}" ${toString cfg.keySize}
+            ${pkgs.openssl}/bin/openssl rsa -in "${privkey}" -pubout -out "${pubkey}"
+            chown -R dkimproxy-out:dkimproxy-out "${keydir}"
+          fi
+        '';
+        script = ''
+          exec ${pkgs.dkimproxy}/bin/dkimproxy.out --conf_file=${configfile}
+        '';
+        serviceConfig = {
+          User = "dkimproxy-out";
+          PermissionsStartOnly = true;
+        };
+      };
+    };
+}
diff --git a/nixos/modules/services/mail/dovecot.nix b/nixos/modules/services/mail/dovecot.nix
index 18101a312254..543e732127a5 100644
--- a/nixos/modules/services/mail/dovecot.nix
+++ b/nixos/modules/services/mail/dovecot.nix
@@ -30,6 +30,7 @@ let
 
     ''
       default_internal_user = ${cfg.user}
+      default_internal_group = ${cfg.group}
       ${optionalString (cfg.mailUser != null) "mail_uid = ${cfg.mailUser}"}
       ${optionalString (cfg.mailGroup != null) "mail_gid = ${cfg.mailGroup}"}
 
@@ -104,7 +105,7 @@ let
   };
 
   mailboxConfig = mailbox: ''
-    mailbox ${mailbox.name} {
+    mailbox "${mailbox.name}" {
       auto = ${toString mailbox.auto}
   '' + optionalString (mailbox.specialUse != null) ''
       special_use = \${toString mailbox.specialUse}
@@ -113,7 +114,7 @@ let
   mailboxes = { lib, pkgs, ... }: {
     options = {
       name = mkOption {
-        type = types.str;
+        type = types.strMatching ''[^"]+'';
         example = "Spam";
         description = "The name of the mailbox.";
       };
diff --git a/nixos/modules/services/mail/postfix.nix b/nixos/modules/services/mail/postfix.nix
index 867c0ea6761c..5ab331ac067f 100644
--- a/nixos/modules/services/mail/postfix.nix
+++ b/nixos/modules/services/mail/postfix.nix
@@ -15,20 +15,18 @@ let
   haveVirtual = cfg.virtual != "";
 
   clientAccess =
-    if (cfg.dnsBlacklistOverrides != "")
-    then [ "check_client_access hash:/etc/postfix/client_access" ]
-    else [];
+    optional (cfg.dnsBlacklistOverrides != "")
+      "check_client_access hash:/etc/postfix/client_access";
 
   dnsBl =
-    if (cfg.dnsBlacklists != [])
-    then [ (concatStringsSep ", " (map (s: "reject_rbl_client " + s) cfg.dnsBlacklists)) ]
-    else [];
+    optionals (cfg.dnsBlacklists != [])
+      (map (s: "reject_rbl_client " + s) cfg.dnsBlacklists);
 
   clientRestrictions = concatStringsSep ", " (clientAccess ++ dnsBl);
 
   mainCf = let
     escape = replaceStrings ["$"] ["$$"];
-    mkList = items: "\n  " + concatStringsSep "\n  " items;
+    mkList = items: "\n  " + concatStringsSep ",\n  " items;
     mkVal = value:
       if isList value then mkList value
         else " " + (if value == true then "yes"
@@ -36,72 +34,9 @@ let
         else toString value);
     mkEntry = name: value: "${escape name} =${mkVal value}";
   in
-    concatStringsSep "\n" (mapAttrsToList mkEntry (recursiveUpdate defaultConf cfg.config))
+    concatStringsSep "\n" (mapAttrsToList mkEntry cfg.config)
       + "\n" + cfg.extraConfig;
 
-  defaultConf = {
-    compatibility_level  = "9999";
-    mail_owner           = user;
-    default_privs        = "nobody";
-
-    # NixOS specific locations
-    data_directory       = "/var/lib/postfix/data";
-    queue_directory      = "/var/lib/postfix/queue";
-
-    # Default location of everything in package
-    meta_directory       = "${pkgs.postfix}/etc/postfix";
-    command_directory    = "${pkgs.postfix}/bin";
-    sample_directory     = "/etc/postfix";
-    newaliases_path      = "${pkgs.postfix}/bin/newaliases";
-    mailq_path           = "${pkgs.postfix}/bin/mailq";
-    readme_directory     = false;
-    sendmail_path        = "${pkgs.postfix}/bin/sendmail";
-    daemon_directory     = "${pkgs.postfix}/libexec/postfix";
-    manpage_directory    = "${pkgs.postfix}/share/man";
-    html_directory       = "${pkgs.postfix}/share/postfix/doc/html";
-    shlib_directory      = false;
-    relayhost            = if cfg.relayHost == "" then "" else
-                             if cfg.lookupMX
-                             then "${cfg.relayHost}:${toString cfg.relayPort}"
-                             else "[${cfg.relayHost}]:${toString cfg.relayPort}";
-
-    mail_spool_directory = "/var/spool/mail/";
-    setgid_group         = setgidGroup;
-  }
-  // optionalAttrs config.networking.enableIPv6 { inet_protocols = "all"; }
-  // optionalAttrs (cfg.networks != null) { mynetworks = cfg.networks; }
-  // optionalAttrs (cfg.networksStyle != "") { mynetworks_style = cfg.networksStyle; }
-  // optionalAttrs (cfg.hostname != "") { myhostname = cfg.hostname; }
-  // optionalAttrs (cfg.domain != "") { mydomain = cfg.domain; }
-  // optionalAttrs (cfg.origin != "") { myorigin =  cfg.origin; }
-  // optionalAttrs (cfg.destination != null) { mydestination = cfg.destination; }
-  // optionalAttrs (cfg.relayDomains != null) { relay_domains = cfg.relayDomains; }
-  // optionalAttrs (cfg.recipientDelimiter != "") { recipient_delimiter = cfg.recipientDelimiter; }
-  // optionalAttrs haveAliases { alias_maps = "${cfg.aliasMapType}:/etc/postfix/aliases"; }
-  // optionalAttrs haveTransport { transport_maps = "hash:/etc/postfix/transport"; }
-  // optionalAttrs haveVirtual { virtual_alias_maps = "${cfg.virtualMapType}:/etc/postfix/virtual"; }
-  // optionalAttrs (cfg.dnsBlacklists != []) { smtpd_client_restrictions = clientRestrictions; }
-  // optionalAttrs cfg.useSrs {
-    sender_canonical_maps = "tcp:127.0.0.1:10001";
-    sender_canonical_classes = "envelope_sender";
-    recipient_canonical_maps = "tcp:127.0.0.1:10002";
-    recipient_canonical_classes= "envelope_recipient";
-  }
-  // optionalAttrs cfg.enableHeaderChecks { header_checks = "regexp:/etc/postfix/header_checks"; }
-  // optionalAttrs (cfg.sslCert != "") {
-    smtp_tls_CAfile = cfg.sslCACert;
-    smtp_tls_cert_file = cfg.sslCert;
-    smtp_tls_key_file = cfg.sslKey;
-
-    smtp_use_tls = true;
-
-    smtpd_tls_CAfile = cfg.sslCACert;
-    smtpd_tls_cert_file = cfg.sslCert;
-    smtpd_tls_key_file = cfg.sslKey;
-
-    smtpd_use_tls = true;
-  };
-
   masterCfOptions = { options, config, name, ... }: {
     options = {
       name = mkOption {
@@ -479,7 +414,10 @@ in
       postmasterAlias = mkOption {
         type = types.str;
         default = "root";
-        description = "Who should receive postmaster e-mail.";
+        description = "
+          Who should receive postmaster e-mail. Multiple values can be added by
+          separating values with comma.
+        ";
       };
 
       rootAlias = mkOption {
@@ -487,6 +425,7 @@ in
         default = "";
         description = "
           Who should receive root e-mail. Blank for no redirection.
+          Multiple values can be added by separating values with comma.
         ";
       };
 
@@ -507,7 +446,6 @@ in
 
       config = mkOption {
         type = with types; attrsOf (either bool (either str (listOf str)));
-        default = defaultConf;
         description = ''
           The main.cf configuration file as key value set.
         '';
@@ -749,6 +687,67 @@ in
           '';
         };
 
+      services.postfix.config = (mapAttrs (_: v: mkDefault v) {
+        compatibility_level  = "9999";
+        mail_owner           = cfg.user;
+        default_privs        = "nobody";
+
+        # NixOS specific locations
+        data_directory       = "/var/lib/postfix/data";
+        queue_directory      = "/var/lib/postfix/queue";
+
+        # Default location of everything in package
+        meta_directory       = "${pkgs.postfix}/etc/postfix";
+        command_directory    = "${pkgs.postfix}/bin";
+        sample_directory     = "/etc/postfix";
+        newaliases_path      = "${pkgs.postfix}/bin/newaliases";
+        mailq_path           = "${pkgs.postfix}/bin/mailq";
+        readme_directory     = false;
+        sendmail_path        = "${pkgs.postfix}/bin/sendmail";
+        daemon_directory     = "${pkgs.postfix}/libexec/postfix";
+        manpage_directory    = "${pkgs.postfix}/share/man";
+        html_directory       = "${pkgs.postfix}/share/postfix/doc/html";
+        shlib_directory      = false;
+        mail_spool_directory = "/var/spool/mail/";
+        setgid_group         = cfg.setgidGroup;
+      })
+      // optionalAttrs (cfg.relayHost != "") { relayhost = if cfg.lookupMX
+                                                           then "${cfg.relayHost}:${toString cfg.relayPort}"
+                                                           else "[${cfg.relayHost}]:${toString cfg.relayPort}"; }
+      // optionalAttrs config.networking.enableIPv6 { inet_protocols = mkDefault "all"; }
+      // optionalAttrs (cfg.networks != null) { mynetworks = cfg.networks; }
+      // optionalAttrs (cfg.networksStyle != "") { mynetworks_style = cfg.networksStyle; }
+      // optionalAttrs (cfg.hostname != "") { myhostname = cfg.hostname; }
+      // optionalAttrs (cfg.domain != "") { mydomain = cfg.domain; }
+      // optionalAttrs (cfg.origin != "") { myorigin =  cfg.origin; }
+      // optionalAttrs (cfg.destination != null) { mydestination = cfg.destination; }
+      // optionalAttrs (cfg.relayDomains != null) { relay_domains = cfg.relayDomains; }
+      // optionalAttrs (cfg.recipientDelimiter != "") { recipient_delimiter = cfg.recipientDelimiter; }
+      // optionalAttrs haveAliases { alias_maps = [ "${cfg.aliasMapType}:/etc/postfix/aliases" ]; }
+      // optionalAttrs haveTransport { transport_maps = [ "hash:/etc/postfix/transport" ]; }
+      // optionalAttrs haveVirtual { virtual_alias_maps = [ "${cfg.virtualMapType}:/etc/postfix/virtual" ]; }
+      // optionalAttrs (cfg.dnsBlacklists != []) { smtpd_client_restrictions = clientRestrictions; }
+      // optionalAttrs cfg.useSrs {
+        sender_canonical_maps = [ "tcp:127.0.0.1:10001" ];
+        sender_canonical_classes = [ "envelope_sender" ];
+        recipient_canonical_maps = [ "tcp:127.0.0.1:10002" ];
+        recipient_canonical_classes = [ "envelope_recipient" ];
+      }
+      // optionalAttrs cfg.enableHeaderChecks { header_checks = [ "regexp:/etc/postfix/header_checks" ]; }
+      // optionalAttrs (cfg.sslCert != "") {
+        smtp_tls_CAfile = cfg.sslCACert;
+        smtp_tls_cert_file = cfg.sslCert;
+        smtp_tls_key_file = cfg.sslKey;
+
+        smtp_use_tls = true;
+
+        smtpd_tls_CAfile = cfg.sslCACert;
+        smtpd_tls_cert_file = cfg.sslCert;
+        smtpd_tls_key_file = cfg.sslKey;
+
+        smtpd_use_tls = true;
+      };
+
       services.postfix.masterConfig = {
         smtp_inet = {
           name = "smtp";
diff --git a/nixos/modules/services/mail/rspamd.nix b/nixos/modules/services/mail/rspamd.nix
index 6d403e448e04..09fb587e74b5 100644
--- a/nixos/modules/services/mail/rspamd.nix
+++ b/nixos/modules/services/mail/rspamd.nix
@@ -1,14 +1,152 @@
-{ config, lib, pkgs, ... }:
+{ config, options, pkgs, lib, ... }:
 
 with lib;
 
 let
 
   cfg = config.services.rspamd;
+  opts = options.services.rspamd;
 
-  mkBindSockets = socks: concatStringsSep "\n" (map (each: "  bind_socket = \"${each}\"") socks);
+  bindSocketOpts = {options, config, ... }: {
+    options = {
+      socket = mkOption {
+        type = types.str;
+        example = "localhost:11333";
+        description = ''
+          Socket for this worker to listen on in a format acceptable by rspamd.
+        '';
+      };
+      mode = mkOption {
+        type = types.str;
+        default = "0644";
+        description = "Mode to set on unix socket";
+      };
+      owner = mkOption {
+        type = types.str;
+        default = "${cfg.user}";
+        description = "Owner to set on unix socket";
+      };
+      group = mkOption {
+        type = types.str;
+        default = "${cfg.group}";
+        description = "Group to set on unix socket";
+      };
+      rawEntry = mkOption {
+        type = types.str;
+        internal = true;
+      };
+    };
+    config.rawEntry = let
+      maybeOption = option:
+        optionalString options.${option}.isDefined " ${option}=${config.${option}}";
+    in
+      if (!(hasPrefix "/" config.socket)) then "${config.socket}"
+      else "${config.socket}${maybeOption "mode"}${maybeOption "owner"}${maybeOption "group"}";
+  };
 
-   rspamdConfFile = pkgs.writeText "rspamd.conf"
+  workerOpts = { name, ... }: {
+    options = {
+      enable = mkOption {
+        type = types.nullOr types.bool;
+        default = null;
+        description = "Whether to run the rspamd worker.";
+      };
+      name = mkOption {
+        type = types.nullOr types.str;
+        default = name;
+        description = "Name of the worker";
+      };
+      type = mkOption {
+        type = types.nullOr (types.enum [
+          "normal" "controller" "fuzzy_storage" "proxy" "lua"
+        ]);
+        description = "The type of this worker";
+      };
+      bindSockets = mkOption {
+        type = types.listOf (types.either types.str (types.submodule bindSocketOpts));
+        default = [];
+        description = ''
+          List of sockets to listen, in format acceptable by rspamd
+        '';
+        example = [{
+          socket = "/run/rspamd.sock";
+          mode = "0666";
+          owner = "rspamd";
+        } "*:11333"];
+        apply = value: map (each: if (isString each)
+          then if (isUnixSocket each)
+            then {socket = each; owner = cfg.user; group = cfg.group; mode = "0644"; rawEntry = "${each}";}
+            else {socket = each; rawEntry = "${each}";}
+          else each) value;
+      };
+      count = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = ''
+          Number of worker instances to run
+        '';
+      };
+      includes = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          List of files to include in configuration
+        '';
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Additional entries to put verbatim into worker section of rspamd config file.";
+      };
+    };
+    config = mkIf (name == "normal" || name == "controller" || name == "fuzzy") {
+      type = mkDefault name;
+      includes = mkDefault [ "$CONFDIR/worker-${name}.inc" ];
+      bindSockets = mkDefault (if name == "normal"
+        then [{
+              socket = "/run/rspamd/rspamd.sock";
+              mode = "0660";
+              owner = cfg.user;
+              group = cfg.group;
+            }]
+        else if name == "controller"
+        then [ "localhost:11334" ]
+        else [] );
+    };
+  };
+
+  indexOf = default: start: list: e:
+    if list == []
+    then default
+    else if (head list) == e then start
+    else (indexOf default (start + (length (listenStreams (head list).socket))) (tail list) e);
+
+  systemdSocket = indexOf (abort "Socket not found") 0 allSockets;
+
+  isUnixSocket = socket: hasPrefix "/" (if (isString socket) then socket else socket.socket);
+  isPort = hasPrefix "*:";
+  isIPv4Socket = hasPrefix "*v4:";
+  isIPv6Socket = hasPrefix "*v6:";
+  isLocalHost = hasPrefix "localhost:";
+  listenStreams = socket:
+    if (isLocalHost socket) then
+      let port = (removePrefix "localhost:" socket);
+      in [ "127.0.0.1:${port}" ] ++ (if config.networking.enableIPv6 then ["[::1]:${port}"] else [])
+    else if (isIPv6Socket socket) then [removePrefix "*v6:" socket]
+    else if (isPort socket) then [removePrefix "*:" socket]
+    else if (isIPv4Socket socket) then
+      throw "error: IPv4 only socket not supported in rspamd with socket activation"
+    else if (length (splitString " " socket)) != 1 then
+      throw "error: string options not supported in rspamd with socket activation"
+    else [socket];
+
+  mkBindSockets = enabled: socks: concatStringsSep "\n  " (flatten (map (each:
+    if cfg.socketActivation && enabled != false then
+      let systemd = (systemdSocket each);
+      in (imap (idx: e: "bind_socket = \"systemd:${toString (systemd + idx - 1)}\";") (listenStreams each.socket))
+    else "bind_socket = \"${each.rawEntry}\";") socks));
+
+  rspamdConfFile = pkgs.writeText "rspamd.conf"
     ''
       .include "$CONFDIR/common.conf"
 
@@ -22,17 +160,33 @@ let
         .include "$CONFDIR/logging.inc"
       }
 
-      worker {
-      ${mkBindSockets cfg.bindSocket}
-        .include "$CONFDIR/worker-normal.inc"
-      }
+      ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
+        worker ${optionalString (value.name != "normal" && value.name != "controller") "${value.name}"} {
+          type = "${value.type}";
+          ${optionalString (value.enable != null)
+            "enabled = ${if value.enable != false then "yes" else "no"};"}
+          ${mkBindSockets value.enable value.bindSockets}
+          ${optionalString (value.count != null) "count = ${toString value.count};"}
+          ${concatStringsSep "\n  " (map (each: ".include \"${each}\"") value.includes)}
+          ${value.extraConfig}
+        }
+      '') cfg.workers)}
 
-      worker {
-      ${mkBindSockets cfg.bindUISocket}
-        .include "$CONFDIR/worker-controller.inc"
-      }
+      ${cfg.extraConfig}
    '';
 
+  allMappedSockets = flatten (mapAttrsToList (name: value:
+    if value.enable != false
+    then imap (idx: each: {
+        name = "${name}";
+        index = idx;
+        value = each;
+      }) value.bindSockets
+    else []) cfg.workers);
+  allSockets = map (e: e.value) allMappedSockets;
+
+  allSocketNames = map (each: "rspamd-${each.name}-${toString each.index}.socket") allMappedSockets;
+
 in
 
 {
@@ -46,36 +200,52 @@ in
       enable = mkEnableOption "Whether to run the rspamd daemon.";
 
       debug = mkOption {
+        type = types.bool;
         default = false;
         description = "Whether to run the rspamd daemon in debug mode.";
       };
 
-      bindSocket = mkOption {
-        type = types.listOf types.str;
-        default = [
-          "/run/rspamd/rspamd.sock mode=0660 owner=${cfg.user} group=${cfg.group}"
-        ];
-        defaultText = ''[
-          "/run/rspamd/rspamd.sock mode=0660 owner=${cfg.user} group=${cfg.group}"
-        ]'';
+      socketActivation = mkOption {
+        type = types.bool;
         description = ''
-          List of sockets to listen, in format acceptable by rspamd
+          Enable systemd socket activation for rspamd.
         '';
-        example = ''
-          bindSocket = [
-            "/run/rspamd.sock mode=0666 owner=rspamd"
-            "*:11333"
-          ];
+      };
+
+      workers = mkOption {
+        type = with types; attrsOf (submodule workerOpts);
+        description = ''
+          Attribute set of workers to start.
+        '';
+        default = {
+          normal = {};
+          controller = {};
+        };
+        example = literalExample ''
+          {
+            normal = {
+              includes = [ "$CONFDIR/worker-normal.inc" ];
+              bindSockets = [{
+                socket = "/run/rspamd/rspamd.sock";
+                mode = "0660";
+                owner = "${cfg.user}";
+                group = "${cfg.group}";
+              }];
+            };
+            controller = {
+              includes = [ "$CONFDIR/worker-controller.inc" ];
+              bindSockets = [ "[::1]:11334" ];
+            };
+          }
         '';
       };
 
-      bindUISocket = mkOption {
-        type = types.listOf types.str;
-        default = [
-          "localhost:11334"
-        ];
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
         description = ''
-          List of sockets for web interface, in format acceptable by rspamd
+          Extra configuration to add at the end of the rspamd configuration
+          file.
         '';
       };
 
@@ -102,6 +272,13 @@ in
 
   config = mkIf cfg.enable {
 
+    services.rspamd.socketActivation = mkDefault (!opts.bindSocket.isDefined && !opts.bindUISocket.isDefined);
+
+    assertions = [ {
+      assertion = !cfg.socketActivation || !(opts.bindSocket.isDefined || opts.bindUISocket.isDefined);
+      message = "Can't use socketActivation for rspamd when using renamed bind socket options";
+    } ];
+
     # Allow users to run 'rspamc' and 'rspamadm'.
     environment.systemPackages = [ pkgs.rspamd ];
 
@@ -117,17 +294,22 @@ in
       gid = config.ids.gids.rspamd;
     };
 
+    environment.etc."rspamd.conf".source = rspamdConfFile;
+
     systemd.services.rspamd = {
       description = "Rspamd Service";
 
-      wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" ];
+      wantedBy = mkIf (!cfg.socketActivation) [ "multi-user.target" ];
+      after = [ "network.target" ] ++
+       (if cfg.socketActivation then allSocketNames else []);
+      requires = mkIf cfg.socketActivation allSocketNames;
 
       serviceConfig = {
         ExecStart = "${pkgs.rspamd}/bin/rspamd ${optionalString cfg.debug "-d"} --user=${cfg.user} --group=${cfg.group} --pid=/run/rspamd.pid -c ${rspamdConfFile} -f";
         Restart = "always";
         RuntimeDirectory = "rspamd";
         PrivateTmp = true;
+        Sockets = mkIf cfg.socketActivation (concatStringsSep " " allSocketNames);
       };
 
       preStart = ''
@@ -135,5 +317,25 @@ in
         ${pkgs.coreutils}/bin/chown ${cfg.user}:${cfg.group} /var/lib/rspamd
       '';
     };
+    systemd.sockets = mkIf cfg.socketActivation
+      (listToAttrs (map (each: {
+        name = "rspamd-${each.name}-${toString each.index}";
+        value = {
+          description = "Rspamd socket ${toString each.index} for worker ${each.name}";
+          wantedBy = [ "sockets.target" ];
+          listenStreams = (listenStreams each.value.socket);
+          socketConfig = {
+            BindIPv6Only = mkIf (isIPv6Socket each.value.socket) "ipv6-only";
+            Service = "rspamd.service";
+            SocketUser = mkIf (isUnixSocket each.value.socket) each.value.owner;
+            SocketGroup = mkIf (isUnixSocket each.value.socket) each.value.group;
+            SocketMode = mkIf (isUnixSocket each.value.socket) each.value.mode;
+          };
+        };
+      }) allMappedSockets));
   };
+  imports = [
+    (mkRenamedOptionModule [ "services" "rspamd" "bindSocket" ] [ "services" "rspamd" "workers" "normal" "bindSockets" ])
+    (mkRenamedOptionModule [ "services" "rspamd" "bindUISocket" ] [ "services" "rspamd" "workers" "controller" "bindSockets" ])
+  ];
 }
diff --git a/nixos/modules/services/misc/defaultUnicornConfig.rb b/nixos/modules/services/misc/defaultUnicornConfig.rb
index 84622622db70..0b58c59c7a51 100644
--- a/nixos/modules/services/misc/defaultUnicornConfig.rb
+++ b/nixos/modules/services/misc/defaultUnicornConfig.rb
@@ -1,205 +1,69 @@
-# The following was taken from github.com/crohr/syslogger and is BSD
-# licensed.
-require 'syslog'
-require 'logger'
-require 'thread'
+worker_processes 3
 
-class Syslogger
-
-  VERSION = "1.6.0"
-
-  attr_reader :level, :ident, :options, :facility, :max_octets
-  attr_accessor :formatter
-
-  MAPPING = {
-    Logger::DEBUG => Syslog::LOG_DEBUG,
-    Logger::INFO => Syslog::LOG_INFO,
-    Logger::WARN => Syslog::LOG_WARNING,
-    Logger::ERROR => Syslog::LOG_ERR,
-    Logger::FATAL => Syslog::LOG_CRIT,
-    Logger::UNKNOWN => Syslog::LOG_ALERT
-  }
-
-  #
-  # Initializes default options for the logger
-  # <tt>ident</tt>:: the name of your program [default=$0].
-  # <tt>options</tt>::  syslog options [default=<tt>Syslog::LOG_PID | Syslog::LOG_CONS</tt>].
-  #                     Correct values are:
-  #                       LOG_CONS    : writes the message on the console if an error occurs when sending the message;
-  #                       LOG_NDELAY  : no delay before sending the message;
-  #                       LOG_PERROR  : messages will also be written on STDERR;
-  #                       LOG_PID     : adds the process number to the message (just after the program name)
-  # <tt>facility</tt>:: the syslog facility [default=nil] Correct values include:
-  #                       Syslog::LOG_DAEMON
-  #                       Syslog::LOG_USER
-  #                       Syslog::LOG_SYSLOG
-  #                       Syslog::LOG_LOCAL2
-  #                       Syslog::LOG_NEWS
-  #                       etc.
-  #
-  # Usage:
-  #   logger = Syslogger.new("my_app", Syslog::LOG_PID | Syslog::LOG_CONS, Syslog::LOG_LOCAL0)
-  #   logger.level = Logger::INFO # use Logger levels
-  #   logger.warn "warning message"
-  #   logger.debug "debug message"
-  #
-  def initialize(ident = $0, options = Syslog::LOG_PID | Syslog::LOG_CONS, facility = nil)
-    @ident = ident
-    @options = options || (Syslog::LOG_PID | Syslog::LOG_CONS)
-    @facility = facility
-    @level = Logger::INFO
-    @mutex = Mutex.new
-    @formatter = Logger::Formatter.new
-  end
-
-  %w{debug info warn error fatal unknown}.each do |logger_method|
-    # Accepting *args as message could be nil.
-    #  Default params not supported in ruby 1.8.7
-    define_method logger_method.to_sym do |*args, &block|
-      return true if @level > Logger.const_get(logger_method.upcase)
-      message = args.first || block && block.call
-      add(Logger.const_get(logger_method.upcase), message)
-    end
-
-    unless logger_method == 'unknown'
-      define_method "#{logger_method}?".to_sym do
-        @level <= Logger.const_get(logger_method.upcase)
-      end
-    end
-  end
-
-  # Log a message at the Logger::INFO level. Useful for use with Rack::CommonLogger
-  def write(msg)
-    add(Logger::INFO, msg)
-  end
-
-  # Logs a message at the Logger::INFO level.
-  def <<(msg)
-    add(Logger::INFO, msg)
-  end
-
-  # Low level method to add a message.
-  # +severity+::  the level of the message. One of Logger::DEBUG, Logger::INFO, Logger::WARN, Logger::ERROR, Logger::FATAL, Logger::UNKNOWN
-  # +message+:: the message string.
-  #             If nil, the method will call the block and use the result as the message string.
-  #             If both are nil or no block is given, it will use the progname as per the behaviour of both the standard Ruby logger, and the Rails BufferedLogger.
-  # +progname+:: optionally, overwrite the program name that appears in the log message.
-  def add(severity, message = nil, progname = nil, &block)
-    if message.nil? && block.nil? && !progname.nil?
-      message, progname = progname, nil
-    end
-    progname ||= @ident
-
-    @mutex.synchronize do
-      Syslog.open(progname, @options, @facility) do |s|
-        s.mask = Syslog::LOG_UPTO(MAPPING[@level])
-        communication = clean(message || block && block.call)
-        if self.max_octets
-          buffer = "#{tags_text}"
-          communication.bytes do |byte|
-            buffer.concat(byte)
-            # if the last byte we added is potentially part of an escape, we'll go ahead and add another byte
-            if buffer.bytesize >= self.max_octets && !['%'.ord,'\\'.ord].include?(byte)
-              s.log(MAPPING[severity],buffer)
-              buffer = ""
-            end
-          end
-          s.log(MAPPING[severity],buffer) unless buffer.empty?
-        else
-          s.log(MAPPING[severity],"#{tags_text}#{communication}")
-        end
-      end
-    end
-  end
-
-  # Set the max octets of the messages written to the log
-  def max_octets=(max_octets)
-    @max_octets = max_octets
-  end
-
-  # Sets the minimum level for messages to be written in the log.
-  # +level+:: one of <tt>Logger::DEBUG</tt>, <tt>Logger::INFO</tt>, <tt>Logger::WARN</tt>, <tt>Logger::ERROR</tt>, <tt>Logger::FATAL</tt>, <tt>Logger::UNKNOWN</tt>
-  def level=(level)
-    level = Logger.const_get(level.to_s.upcase) if level.is_a?(Symbol)
-
-    unless level.is_a?(Fixnum)
-      raise ArgumentError.new("Invalid logger level `#{level.inspect}`")
-    end
-
-    @level = level
-  end
-
-  # Sets the ident string passed along to Syslog
-  def ident=(ident)
-    @ident = ident
-  end
-
-  # Tagging code borrowed from ActiveSupport gem
-  def tagged(*tags)
-    new_tags = push_tags(*tags)
-    yield self
-  ensure
-    pop_tags(new_tags.size)
-  end
-
-  def push_tags(*tags)
-    tags.flatten.reject{ |i| i.respond_to?(:empty?) ? i.empty? : !i }.tap do |new_tags|
-      current_tags.concat new_tags
-    end
-  end
-
-  def pop_tags(size = 1)
-    current_tags.pop size
-  end
-
-  def clear_tags!
-    current_tags.clear
-  end
-
-  protected
-
-  # Borrowed from SyslogLogger.
-  def clean(message)
-    message = message.to_s.dup
-    message.strip! # remove whitespace
-    message.gsub!(/\n/, '\\n') # escape newlines
-    message.gsub!(/%/, '%%') # syslog(3) freaks on % (printf)
-    message.gsub!(/\e\[[^m]*m/, '') # remove useless ansi color codes
-    message
-  end
-
-  private
-
-  def tags_text
-    tags = current_tags
-    if tags.any?
-      tags.collect { |tag| "[#{tag}] " }.join
-    end
-  end
-
-  def current_tags
-    Thread.current[:syslogger_tagged_logging_tags] ||= []
-  end
-end
+listen ENV["UNICORN_PATH"] + "/tmp/sockets/gitlab.socket", :backlog => 1024
+listen "/run/gitlab/gitlab.socket", :backlog => 1024
 
-worker_processes 2
 working_directory ENV["GITLAB_PATH"]
-pid ENV["UNICORN_PATH"] + "/tmp/pids/unicorn.pid"
 
-listen ENV["UNICORN_PATH"] + "/tmp/sockets/gitlab.socket", :backlog => 1024
+pid ENV["UNICORN_PATH"] + "/tmp/pids/unicorn.pid"
 
 timeout 60
 
-logger Syslogger.new
-
+# combine Ruby 2.0.0dev or REE with "preload_app true" for memory savings
+# http://rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
 preload_app true
-
 GC.respond_to?(:copy_on_write_friendly=) and
   GC.copy_on_write_friendly = true
 
 check_client_connection false
 
+before_fork do |server, worker|
+  # the following is highly recommended for Rails + "preload_app true"
+  # as there's no need for the master process to hold a connection
+  defined?(ActiveRecord::Base) and
+    ActiveRecord::Base.connection.disconnect!
+
+  # The following is only recommended for memory/DB-constrained
+  # installations.  It is not needed if your system can house
+  # twice as many worker_processes as you have configured.
+  #
+  # This allows a new master process to incrementally
+  # phase out the old master process with SIGTTOU to avoid a
+  # thundering herd (especially in the "preload_app false" case)
+  # when doing a transparent upgrade.  The last worker spawned
+  # will then kill off the old master process with a SIGQUIT.
+  old_pid = "#{server.config[:pid]}.oldbin"
+  if old_pid != server.pid
+    begin
+      sig = (worker.nr + 1) >= server.worker_processes ? :QUIT : :TTOU
+      Process.kill(sig, File.read(old_pid).to_i)
+    rescue Errno::ENOENT, Errno::ESRCH
+    end
+  end
+
+  # Throttle the master from forking too quickly by sleeping.  Due
+  # to the implementation of standard Unix signal handlers, this
+  # helps (but does not completely) prevent identical, repeated signals
+  # from being lost when the receiving process is busy.
+  # sleep 1
+end
+
 after_fork do |server, worker|
+  # per-process listener ports for debugging/admin/migrations
+  # addr = "127.0.0.1:#{9293 + worker.nr}"
+  # server.listen(addr, :tries => -1, :delay => 5, :tcp_nopush => true)
+
+  # the following is *required* for Rails + "preload_app true",
   defined?(ActiveRecord::Base) and
     ActiveRecord::Base.establish_connection
+
+  # reset prometheus client, this will cause any opened metrics files to be closed
+  defined?(::Prometheus::Client.reinitialize_on_pid_change) &&
+    Prometheus::Client.reinitialize_on_pid_change
+
+  # if preload_app is true, then you may also want to check and
+  # restart any other shared sockets/descriptors such as Memcached,
+  # and Redis.  TokyoCabinet file handles are safe to reuse
+  # between any number of forked children (assuming your kernel
+  # correctly implements pread()/pwrite() system calls)
 end
diff --git a/nixos/modules/services/misc/disnix.nix b/nixos/modules/services/misc/disnix.nix
index e96645c79c77..e4517c636e88 100644
--- a/nixos/modules/services/misc/disnix.nix
+++ b/nixos/modules/services/misc/disnix.nix
@@ -32,11 +32,17 @@ in
         description = "Whether to enable Disnix";
       };
 
+      enableMultiUser = mkOption {
+        type = types.bool;
+        default = true;
+        description = "Whether to support multi-user mode by enabling the Disnix D-Bus service";
+      };
+
       useWebServiceInterface = mkOption {
         default = false;
         description = "Whether to enable the DisnixWebService interface running on Apache Tomcat";
       };
-      
+
       package = mkOption {
         type = types.path;
         description = "The Disnix package";
@@ -51,8 +57,8 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
-    dysnomia.enable = true;
-    
+    services.dysnomia.enable = true;
+
     environment.systemPackages = [ pkgs.disnix ] ++ optional cfg.useWebServiceInterface pkgs.DisnixWebService;
 
     services.dbus.enable = true;
@@ -71,7 +77,7 @@ in
       };
 
     systemd.services = {
-      disnix = {
+      disnix = mkIf cfg.enableMultiUser {
         description = "Disnix server";
         wants = [ "dysnomia.target" ];
         wantedBy = [ "multi-user.target" ];
@@ -92,7 +98,7 @@ in
         }
         // (if config.environment.variables ? DYSNOMIA_CONTAINERS_PATH then { inherit (config.environment.variables) DYSNOMIA_CONTAINERS_PATH; } else {})
         // (if config.environment.variables ? DYSNOMIA_MODULES_PATH then { inherit (config.environment.variables) DYSNOMIA_MODULES_PATH; } else {});
-        
+
         serviceConfig.ExecStart = "${cfg.package}/bin/disnix-service";
       };
 
diff --git a/nixos/modules/services/misc/dysnomia.nix b/nixos/modules/services/misc/dysnomia.nix
index df44d0a54866..9e66e0811ab7 100644
--- a/nixos/modules/services/misc/dysnomia.nix
+++ b/nixos/modules/services/misc/dysnomia.nix
@@ -3,8 +3,8 @@
 with lib;
 
 let
-  cfg = config.dysnomia;
-  
+  cfg = config.services.dysnomia;
+
   printProperties = properties:
     concatMapStrings (propertyName:
       let
@@ -13,7 +13,7 @@ let
       if isList property then "${propertyName}=(${lib.concatMapStrings (elem: "\"${toString elem}\" ") (properties."${propertyName}")})\n"
       else "${propertyName}=\"${toString property}\"\n"
     ) (builtins.attrNames properties);
-  
+
   properties = pkgs.stdenv.mkDerivation {
     name = "dysnomia-properties";
     buildCommand = ''
@@ -22,13 +22,13 @@ let
       EOF
     '';
   };
-  
+
   containersDir = pkgs.stdenv.mkDerivation {
     name = "dysnomia-containers";
     buildCommand = ''
       mkdir -p $out
       cd $out
-      
+
       ${concatMapStrings (containerName:
         let
           containerProperties = cfg.containers."${containerName}";
@@ -42,11 +42,11 @@ let
       ) (builtins.attrNames cfg.containers)}
     '';
   };
-  
+
   linkMutableComponents = {containerName}:
     ''
       mkdir ${containerName}
-      
+
       ${concatMapStrings (componentName:
         let
           component = cfg.components."${containerName}"."${componentName}";
@@ -54,13 +54,13 @@ let
         "ln -s ${component} ${containerName}/${componentName}\n"
       ) (builtins.attrNames (cfg.components."${containerName}" or {}))}
     '';
-  
+
   componentsDir = pkgs.stdenv.mkDerivation {
     name = "dysnomia-components";
     buildCommand = ''
       mkdir -p $out
       cd $out
-      
+
       ${concatMapStrings (containerName:
         let
           components = cfg.components."${containerName}";
@@ -72,59 +72,59 @@ let
 in
 {
   options = {
-    dysnomia = {
-      
+    services.dysnomia = {
+
       enable = mkOption {
         type = types.bool;
         default = false;
         description = "Whether to enable Dysnomia";
       };
-      
+
       enableAuthentication = mkOption {
         type = types.bool;
         default = false;
         description = "Whether to publish privacy-sensitive authentication credentials";
       };
-      
+
       package = mkOption {
         type = types.path;
         description = "The Dysnomia package";
       };
-      
+
       properties = mkOption {
         description = "An attribute set in which each attribute represents a machine property. Optionally, these values can be shell substitutions.";
         default = {};
       };
-      
+
       containers = mkOption {
         description = "An attribute set in which each key represents a container and each value an attribute set providing its configuration properties";
         default = {};
       };
-      
+
       components = mkOption {
         description = "An atttribute set in which each key represents a container and each value an attribute set in which each key represents a component and each value a derivation constructing its initial state";
         default = {};
       };
-      
+
       extraContainerProperties = mkOption {
         description = "An attribute set providing additional container settings in addition to the default properties";
         default = {};
       };
-      
+
       extraContainerPaths = mkOption {
         description = "A list of paths containing additional container configurations that are added to the search folders";
         default = [];
       };
-      
+
       extraModulePaths = mkOption {
         description = "A list of paths containing additional modules that are added to the search folders";
         default = [];
       };
     };
   };
-  
+
   config = mkIf cfg.enable {
-  
+
     environment.etc = {
       "dysnomia/containers" = {
         source = containersDir;
@@ -136,16 +136,16 @@ in
         source = properties;
       };
     };
-    
+
     environment.variables = {
       DYSNOMIA_STATEDIR = "/var/state/dysnomia-nixos";
       DYSNOMIA_CONTAINERS_PATH = "${lib.concatMapStrings (containerPath: "${containerPath}:") cfg.extraContainerPaths}/etc/dysnomia/containers";
       DYSNOMIA_MODULES_PATH = "${lib.concatMapStrings (modulePath: "${modulePath}:") cfg.extraModulePaths}/etc/dysnomia/modules";
     };
-    
+
     environment.systemPackages = [ cfg.package ];
-    
-    dysnomia.package = pkgs.dysnomia.override (origArgs: {
+
+    services.dysnomia.package = pkgs.dysnomia.override (origArgs: {
       enableApacheWebApplication = config.services.httpd.enable;
       enableAxis2WebService = config.services.tomcat.axis2.enable;
       enableEjabberdDump = config.services.ejabberd.enable;
@@ -155,10 +155,10 @@ in
       enableTomcatWebApplication = config.services.tomcat.enable;
       enableMongoDatabase = config.services.mongodb.enable;
     });
-    
-    dysnomia.properties = {
+
+    services.dysnomia.properties = {
       hostname = config.networking.hostName;
-      system = if config.nixpkgs.system == "" then builtins.currentSystem else config.nixpkgs.system;
+      inherit (config.nixpkgs.localSystem) system;
 
       supportedTypes = (import "${pkgs.stdenv.mkDerivation {
         name = "supportedtypes";
@@ -173,8 +173,8 @@ in
         '';
       }}");
     };
-    
-    dysnomia.containers = lib.recursiveUpdate ({
+
+    services.dysnomia.containers = lib.recursiveUpdate ({
       process = {};
       wrapper = {};
     }
@@ -192,9 +192,11 @@ in
         mysqlPassword = builtins.readFile (config.services.mysql.rootPassword);
       };
     }
-    // lib.optionalAttrs (config.services.postgresql.enable && cfg.enableAuthentication) { postgresql-database = {
-      postgresqlUsername = "root";
-    }; }
+    // lib.optionalAttrs (config.services.postgresql.enable) { postgresql-database = {
+      } // lib.optionalAttrs (cfg.enableAuthentication) {
+        postgresqlUsername = "postgres";
+      };
+    }
     // lib.optionalAttrs (config.services.tomcat.enable) { tomcat-webapplication = {
       tomcatPort = 8080;
     }; }
diff --git a/nixos/modules/services/misc/folding-at-home.nix b/nixos/modules/services/misc/folding-at-home.nix
index 053e7e95635f..164221cbab7f 100644
--- a/nixos/modules/services/misc/folding-at-home.nix
+++ b/nixos/modules/services/misc/folding-at-home.nix
@@ -57,7 +57,7 @@ in {
         chown ${fahUser} ${stateDir}
         cp -f ${pkgs.writeText "client.cfg" cfg.config} ${stateDir}/client.cfg
       '';
-      script = "${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${fahUser} -c 'cd ${stateDir}; ${pkgs.foldingathome}/bin/fah6'";
+      script = "${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${fahUser} -c 'cd ${stateDir}; ${pkgs.foldingathome}/bin/fah6'";
     };
 
     services.foldingAtHome.config = ''
diff --git a/nixos/modules/services/misc/geoip-updater.nix b/nixos/modules/services/misc/geoip-updater.nix
index 760fa66e80d6..e0b9df96f8e8 100644
--- a/nixos/modules/services/misc/geoip-updater.nix
+++ b/nixos/modules/services/misc/geoip-updater.nix
@@ -14,7 +14,7 @@ let
   # ExecStart= command with '@' doesn't work because we start a shell (new
   # process) that creates a new argv[0].)
   geoip-updater = pkgs.writeScriptBin "geoip-updater" ''
-    #!${pkgs.stdenv.shell}
+    #!${pkgs.runtimeShell}
     skipExisting=0
     debug()
     {
diff --git a/nixos/modules/services/misc/gitea.nix b/nixos/modules/services/misc/gitea.nix
index f0b44b7bedeb..63e976ae566c 100644
--- a/nixos/modules/services/misc/gitea.nix
+++ b/nixos/modules/services/misc/gitea.nix
@@ -4,6 +4,8 @@ with lib;
 
 let
   cfg = config.services.gitea;
+  pg = config.services.postgresql;
+  usePostgresql = cfg.database.type == "postgres";
   configFile = pkgs.writeText "app.ini" ''
     APP_NAME = ${cfg.appName}
     RUN_USER = ${cfg.user}
@@ -16,6 +18,9 @@ let
     USER = ${cfg.database.user}
     PASSWD = #dbpass#
     PATH = ${cfg.database.path}
+    ${optionalString usePostgresql ''
+      SSL_MODE = disable
+    ''}
 
     [repository]
     ROOT = ${cfg.repositoryRoot}
@@ -35,6 +40,10 @@ let
     SECRET_KEY = #secretkey#
     INSTALL_LOCK = true
 
+    [log]
+    ROOT_PATH = ${cfg.log.rootPath}
+    LEVEL = ${cfg.log.level}
+
     ${cfg.extraConfig}
   '';
 in
@@ -60,6 +69,19 @@ in
         description = "gitea data directory.";
       };
 
+      log = {
+        rootPath = mkOption {
+          default = "${cfg.stateDir}/log";
+          type = types.str;
+          description = "Root path for log files.";
+        };
+        level = mkOption {
+          default = "Trace";
+          type = types.enum [ "Trace" "Debug" "Info" "Warn" "Error" "Critical" ];
+          description = "General log level.";
+        };
+      };
+
       user = mkOption {
         type = types.str;
         default = "gitea";
@@ -82,7 +104,7 @@ in
 
         port = mkOption {
           type = types.int;
-          default = 3306;
+          default = (if !usePostgresql then 3306 else pg.port);
           description = "Database host port.";
         };
 
@@ -123,6 +145,15 @@ in
           default = "${cfg.stateDir}/data/gitea.db";
           description = "Path to the sqlite3 database file.";
         };
+
+        createDatabase = mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            Whether to create a local postgresql database automatically.
+            This only applies if database type "postgres" is selected.
+          '';
+        };
       };
 
       appName = mkOption {
@@ -186,10 +217,11 @@ in
   };
 
   config = mkIf cfg.enable {
+    services.postgresql.enable = mkIf usePostgresql (mkDefault true);
 
     systemd.services.gitea = {
       description = "gitea";
-      after = [ "network.target" ];
+      after = [ "network.target" "postgresql.service" ];
       wantedBy = [ "multi-user.target" ];
       path = [ pkgs.gitea.bin ];
 
@@ -231,12 +263,31 @@ in
           mkdir -p ${cfg.stateDir}/conf
           cp -r ${pkgs.gitea.out}/locale ${cfg.stateDir}/conf/locale
         fi
+      '' + optionalString (usePostgresql && cfg.database.createDatabase) ''
+        if ! test -e "${cfg.stateDir}/db-created"; then
+          echo "CREATE ROLE ${cfg.database.user}
+                  WITH ENCRYPTED PASSWORD '$(head -n1 ${cfg.database.passwordFile})'
+                  NOCREATEDB NOCREATEROLE LOGIN"   |
+            ${pkgs.sudo}/bin/sudo -u ${pg.superUser} ${pg.package}/bin/psql
+          ${pkgs.sudo}/bin/sudo -u ${pg.superUser} \
+            ${pg.package}/bin/createdb             \
+            --owner=${cfg.database.user}           \
+            --encoding=UTF8                        \
+            --lc-collate=C                         \
+            --lc-ctype=C                           \
+            --template=template0                   \
+            ${cfg.database.name}
+          touch "${cfg.stateDir}/db-created"
+        fi
+      '' + ''
+        chown ${cfg.user} -R ${cfg.stateDir}
       '';
 
       serviceConfig = {
         Type = "simple";
         User = cfg.user;
         WorkingDirectory = cfg.stateDir;
+        PermissionsStartOnly = true;
         ExecStart = "${pkgs.gitea.bin}/bin/gitea web";
         Restart = "always";
       };
@@ -253,6 +304,7 @@ in
         description = "Gitea Service";
         home = cfg.stateDir;
         createHome = true;
+        useDefaultShell = true;
       };
     };
 
diff --git a/nixos/modules/services/misc/gitit.nix b/nixos/modules/services/misc/gitit.nix
index 44880ebeda14..94a98e0335df 100644
--- a/nixos/modules/services/misc/gitit.nix
+++ b/nixos/modules/services/misc/gitit.nix
@@ -17,7 +17,7 @@ let
   gititSh = hsPkgs: extras: with pkgs; let
     env = gititWithPkgs hsPkgs extras;
   in writeScript "gitit" ''
-    #!${stdenv.shell}
+    #!${runtimeShell}
     cd $HOME
     export NIX_GHC="${env}/bin/ghc"
     export NIX_GHCPKG="${env}/bin/ghc-pkg"
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index 7b2b40e59232..20d7ec90dcc9 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -29,8 +29,12 @@ let
 
   gitalyToml = pkgs.writeText "gitaly.toml" ''
     socket_path = "${lib.escape ["\""] gitalySocket}"
+    bin_dir = "${cfg.packages.gitaly}/bin"
     prometheus_listen_addr = "localhost:9236"
 
+    [git]
+    bin_path = "${pkgs.git}/bin/git"
+
     [gitaly-ruby]
     dir = "${cfg.packages.gitaly.ruby}"
 
@@ -70,7 +74,7 @@ let
       secret_key_base: ${cfg.secrets.secret}
       otp_key_base: ${cfg.secrets.otp}
       db_key_base: ${cfg.secrets.db}
-      jws_private_key: ${builtins.toJSON cfg.secrets.jws}
+      openid_connect_signing_key: ${builtins.toJSON cfg.secrets.jws}
   '';
 
   gitlabConfig = {
@@ -104,6 +108,7 @@ let
       ldap.enabled = false;
       omniauth.enabled = false;
       shared.path = "${cfg.statePath}/shared";
+      gitaly.client_path = "${cfg.packages.gitaly}/bin";
       backup.path = "${cfg.backupPath}";
       gitlab_shell = {
         path = "${cfg.packages.gitlab-shell}";
@@ -117,8 +122,6 @@ let
       };
       git = {
         bin_path = "git";
-        max_size = 20971520; # 20MB
-        timeout = 10;
       };
       monitoring = {
         ip_whitelist = [ "127.0.0.0/8" "::1/128" ];
@@ -140,6 +143,7 @@ let
     GITLAB_PATH = "${cfg.packages.gitlab}/share/gitlab/";
     GITLAB_STATE_PATH = "${cfg.statePath}";
     GITLAB_UPLOADS_PATH = "${cfg.statePath}/uploads";
+    SCHEMA = "${cfg.statePath}/db/schema.rb";
     GITLAB_LOG_PATH = "${cfg.statePath}/log";
     GITLAB_SHELL_PATH = "${cfg.packages.gitlab-shell}";
     GITLAB_SHELL_CONFIG_PATH = "${cfg.statePath}/shell/config.yml";
@@ -248,7 +252,6 @@ in {
 
       databasePassword = mkOption {
         type = types.str;
-        default = "";
         description = "Gitlab database user password.";
       };
 
@@ -440,12 +443,6 @@ in {
 
     environment.systemPackages = [ pkgs.git gitlab-rake cfg.packages.gitlab-shell ];
 
-    assertions = [
-      { assertion = cfg.databasePassword != "";
-        message = "databasePassword must be set";
-      }
-    ];
-
     # Redis is required for the sidekiq queue runner.
     services.redis.enable = mkDefault true;
     # We use postgres as the main data store.
@@ -496,13 +493,15 @@ in {
       after = [ "network.target" "gitlab.service" ];
       wantedBy = [ "multi-user.target" ];
       environment.HOME = gitlabEnv.HOME;
-      path = with pkgs; [ gitAndTools.git cfg.packages.gitaly.rubyEnv ];
+      environment.GEM_HOME = "${cfg.packages.gitaly.rubyEnv}/${ruby.gemPath}";
+      environment.GITLAB_SHELL_CONFIG_PATH = gitlabEnv.GITLAB_SHELL_CONFIG_PATH;
+      path = with pkgs; [ gitAndTools.git cfg.packages.gitaly.rubyEnv ruby ];
       serviceConfig = {
         #PermissionsStartOnly = true; # preStart must be run as root
         Type = "simple";
         User = cfg.user;
         Group = cfg.group;
-        TimeoutSec = "300";
+        TimeoutSec = "infinity";
         Restart = "on-failure";
         WorkingDirectory = gitlabEnv.HOME;
         ExecStart = "${cfg.packages.gitaly}/bin/gitaly ${gitalyToml}";
@@ -568,6 +567,7 @@ in {
         mkdir -p ${cfg.statePath}/tmp/pids
         mkdir -p ${cfg.statePath}/tmp/sockets
         mkdir -p ${cfg.statePath}/shell
+        mkdir -p ${cfg.statePath}/db
 
         rm -rf ${cfg.statePath}/config ${cfg.statePath}/shell/hooks
         mkdir -p ${cfg.statePath}/config
@@ -582,6 +582,7 @@ in {
         ln -sf ${cfg.statePath}/log /run/gitlab/log
         ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads
         ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp
+        ln -sf $GITLAB_SHELL_CONFIG_PATH /run/gitlab/shell-config.yml
         chown -R ${cfg.user}:${cfg.group} /run/gitlab
 
         # Prepare home directory
@@ -589,6 +590,7 @@ in {
         touch ${gitlabEnv.HOME}/.ssh/authorized_keys
         chown -R ${cfg.user}:${cfg.group} ${gitlabEnv.HOME}/
 
+        cp -rf ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
         cp -rf ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
         ${optionalString cfg.smtp.enable ''
           ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
diff --git a/nixos/modules/services/misc/gitolite.nix b/nixos/modules/services/misc/gitolite.nix
index f395b9558b5a..6e60316d000c 100644
--- a/nixos/modules/services/misc/gitolite.nix
+++ b/nixos/modules/services/misc/gitolite.nix
@@ -207,7 +207,7 @@ in
             gitolite setup -pk ${pubkeyFile}
           fi
           if [ -n "${hooks}" ]; then
-            cp ${hooks} .gitolite/hooks/common/
+            cp -f ${hooks} .gitolite/hooks/common/
             chmod +x .gitolite/hooks/common/*
           fi
           gitolite setup # Upgrade if needed
diff --git a/nixos/modules/services/misc/gitweb.nix b/nixos/modules/services/misc/gitweb.nix
new file mode 100644
index 000000000000..ca21366b7796
--- /dev/null
+++ b/nixos/modules/services/misc/gitweb.nix
@@ -0,0 +1,59 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gitweb;
+
+in
+{
+
+  options.services.gitweb = {
+
+    projectroot = mkOption {
+      default = "/srv/git";
+      type = types.path;
+      description = ''
+        Path to git projects (bare repositories) that should be served by
+        gitweb. Must not end with a slash.
+      '';
+    };
+
+    extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      description = ''
+        Verbatim configuration text appended to the generated gitweb.conf file.
+      '';
+      example = ''
+        $feature{'highlight'}{'default'} = [1];
+        $feature{'ctags'}{'default'} = [1];
+        $feature{'avatar'}{'default'} = ['gravatar'];
+      '';
+    };
+
+    gitwebTheme = mkOption {
+      default = false;
+      type = types.bool;
+      description = ''
+        Use an alternative theme for gitweb, strongly inspired by GitHub.
+      '';
+    };
+
+    gitwebConfigFile = mkOption {
+      default = pkgs.writeText "gitweb.conf" ''
+        # path to git projects (<project>.git)
+        $projectroot = "${cfg.projectroot}";
+        $highlight_bin = "${pkgs.highlight}/bin/highlight";
+        ${cfg.extraConfig}
+      '';
+      type = types.path;
+      readOnly = true;
+      internal = true;
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ gnidorah ];
+
+}
diff --git a/nixos/modules/services/misc/gogs.nix b/nixos/modules/services/misc/gogs.nix
index f6d326e43d94..ba744d37e71c 100644
--- a/nixos/modules/services/misc/gogs.nix
+++ b/nixos/modules/services/misc/gogs.nix
@@ -35,6 +35,9 @@ let
     SECRET_KEY = #secretkey#
     INSTALL_LOCK = true
 
+    [log]
+    ROOT_PATH = ${cfg.stateDir}/log
+
     ${cfg.extraConfig}
   '';
 in
diff --git a/nixos/modules/services/misc/gollum.nix b/nixos/modules/services/misc/gollum.nix
index 81fb024f9094..0888221ab62f 100644
--- a/nixos/modules/services/misc/gollum.nix
+++ b/nixos/modules/services/misc/gollum.nix
@@ -32,6 +32,24 @@ in
       description = "Content of the configuration file";
     };
 
+    mathjax = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Enable support for math rendering using MathJax";
+    };
+
+    allowUploads = mkOption {
+      type = types.nullOr (types.enum [ "dir" "page" ]);
+      default = null;
+      description = "Enable uploads of external files";
+    };
+
+    emoji = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Parse and interpret emoji tags";
+    };
+
     branch = mkOption {
       type = types.str;
       default = "master";
@@ -84,6 +102,9 @@ in
             --host ${cfg.address} \
             --config ${builtins.toFile "gollum-config.rb" cfg.extraConfig} \
             --ref ${cfg.branch} \
+            ${optionalString cfg.mathjax "--mathjax"} \
+            ${optionalString cfg.emoji "--emoji"} \
+            ${optionalString (cfg.allowUploads != null) "--allow-uploads ${cfg.allowUploads}"} \
             ${cfg.stateDir}
         '';
       };
diff --git a/nixos/modules/services/misc/home-assistant.nix b/nixos/modules/services/misc/home-assistant.nix
new file mode 100644
index 000000000000..ac37c11106ef
--- /dev/null
+++ b/nixos/modules/services/misc/home-assistant.nix
@@ -0,0 +1,144 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.home-assistant;
+
+  configFile = pkgs.writeText "configuration.yaml" (builtins.toJSON cfg.config);
+
+  availableComponents = pkgs.home-assistant.availableComponents;
+
+  # Given component "parentConfig.platform", returns whether config.parentConfig
+  # is a list containing a set with set.platform == "platform".
+  #
+  # For example, the component sensor.luftdaten is used as follows:
+  # config.sensor = [ {
+  #   platform = "luftdaten";
+  #   ...
+  # } ];
+  useComponentPlatform = component:
+    let
+      path = splitString "." component;
+      parentConfig = attrByPath (init path) null cfg.config;
+      platform = last path;
+    in isList parentConfig && any
+      (item: item.platform or null == platform)
+      parentConfig;
+
+  # Returns whether component is used in config
+  useComponent = component:
+    hasAttrByPath (splitString "." component) cfg.config
+    || useComponentPlatform component;
+
+  # List of components used in config
+  extraComponents = filter useComponent availableComponents;
+
+  package = if cfg.autoExtraComponents
+    then (cfg.package.override { inherit extraComponents; })
+    else cfg.package;
+
+in {
+  meta.maintainers = with maintainers; [ dotlambda ];
+
+  options.services.home-assistant = {
+    enable = mkEnableOption "Home Assistant";
+
+    configDir = mkOption {
+      default = "/var/lib/hass";
+      type = types.path;
+      description = "The config directory, where your <filename>configuration.yaml</filename> is located.";
+    };
+
+    config = mkOption {
+      default = null;
+      type = with types; nullOr attrs;
+      example = literalExample ''
+        {
+          homeassistant = {
+            name = "Home";
+            time_zone = "UTC";
+          };
+          frontend = { };
+          http = { };
+          feedreader.urls = [ "https://nixos.org/blogs.xml" ];
+        }
+      '';
+      description = ''
+        Your <filename>configuration.yaml</filename> as a Nix attribute set.
+        Beware that setting this option will delete your previous <filename>configuration.yaml</filename>.
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.home-assistant;
+      defaultText = "pkgs.home-assistant";
+      type = types.package;
+      example = literalExample ''
+        pkgs.home-assistant.override {
+          extraPackages = ps: with ps; [ colorlog ];
+        }
+      '';
+      description = ''
+        Home Assistant package to use.
+        Override <literal>extraPackages</literal> in order to add additional dependencies.
+      '';
+    };
+
+    autoExtraComponents = mkOption {
+      default = true;
+      type = types.bool;
+      description = ''
+        If set to <literal>true</literal>, the components used in <literal>config</literal>
+        are set as the specified package's <literal>extraComponents</literal>.
+        This in turn adds all packaged dependencies to the derivation.
+        You might still see import errors in your log.
+        In this case, you will need to package the necessary dependencies yourself
+        or ask for someone else to package them.
+        If a dependency is packaged but not automatically added to this list,
+        you might need to specify it in <literal>extraPackages</literal>.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.home-assistant = {
+      description = "Home Assistant";
+      after = [ "network.target" ];
+      preStart = lib.optionalString (cfg.config != null) ''
+        rm -f ${cfg.configDir}/configuration.yaml
+        ln -s ${configFile} ${cfg.configDir}/configuration.yaml
+      '';
+      serviceConfig = {
+        ExecStart = ''
+          ${package}/bin/hass --config "${cfg.configDir}"
+        '';
+        User = "hass";
+        Group = "hass";
+        Restart = "on-failure";
+        ProtectSystem = "strict";
+        ReadWritePaths = "${cfg.configDir}";
+        PrivateTmp = true;
+      };
+      path = [
+        "/run/wrappers" # needed for ping
+      ];
+    };
+
+    systemd.targets.home-assistant = rec {
+      description = "Home Assistant";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "home-assistant.service" ];
+      after = wants;
+    };
+
+    users.extraUsers.hass = {
+      home = cfg.configDir;
+      createHome = true;
+      group = "hass";
+      uid = config.ids.uids.hass;
+    };
+
+    users.extraGroups.hass.gid = config.ids.gids.hass;
+  };
+}
diff --git a/nixos/modules/services/misc/ihaskell.nix b/nixos/modules/services/misc/ihaskell.nix
index e07a4a44613a..6da9cc8c47e6 100644
--- a/nixos/modules/services/misc/ihaskell.nix
+++ b/nixos/modules/services/misc/ihaskell.nix
@@ -55,7 +55,7 @@ in
       serviceConfig = {
         User = config.users.extraUsers.ihaskell.name;
         Group = config.users.extraGroups.ihaskell.name;
-        ExecStart = "${pkgs.stdenv.shell} -c \"cd $HOME;${ihaskell}/bin/ihaskell-notebook\"";
+        ExecStart = "${pkgs.runtimeShell} -c \"cd $HOME;${ihaskell}/bin/ihaskell-notebook\"";
       };
     };
   };
diff --git a/nixos/modules/services/misc/logkeys.nix b/nixos/modules/services/misc/logkeys.nix
index 6051c884465d..df0b3ae24c90 100644
--- a/nixos/modules/services/misc/logkeys.nix
+++ b/nixos/modules/services/misc/logkeys.nix
@@ -1,4 +1,4 @@
-{ config, lib, ... }:
+{ config, lib, pkgs, ... }:
 
 with lib;
 
diff --git a/nixos/modules/services/misc/matrix-synapse.nix b/nixos/modules/services/misc/matrix-synapse.nix
index a3ec0ea59f8f..7e880ad09b89 100644
--- a/nixos/modules/services/misc/matrix-synapse.nix
+++ b/nixos/modules/services/misc/matrix-synapse.nix
@@ -4,6 +4,8 @@ with lib;
 
 let
   cfg = config.services.matrix-synapse;
+  pg = config.services.postgresql;
+  usePostgresql = cfg.database_type == "psycopg2";
   logConfigFile = pkgs.writeText "log_config.yaml" cfg.logConfig;
   mkResource = r: ''{names: ${builtins.toJSON r.names}, compress: ${boolToString r.compress}}'';
   mkListener = l: ''{port: ${toString l.port}, bind_address: "${l.bind_address}", type: ${l.type}, tls: ${boolToString l.tls}, x_forwarded: ${boolToString l.x_forwarded}, resources: [${concatStringsSep "," (map mkResource l.resources)}]}'';
@@ -38,13 +40,12 @@ database: {
   name: "${cfg.database_type}",
   args: {
     ${concatStringsSep ",\n    " (
-      mapAttrsToList (n: v: "\"${n}\": ${v}") cfg.database_args
+      mapAttrsToList (n: v: "\"${n}\": ${builtins.toJSON v}") cfg.database_args
     )}
   }
 }
 event_cache_size: "${cfg.event_cache_size}"
 verbose: ${cfg.verbose}
-log_file: "/var/log/matrix-synapse/homeserver.log"
 log_config: "${logConfigFile}"
 rc_messages_per_second: ${cfg.rc_messages_per_second}
 rc_message_burst_count: ${cfg.rc_message_burst_count}
@@ -53,8 +54,8 @@ federation_rc_sleep_limit: ${cfg.federation_rc_sleep_limit}
 federation_rc_sleep_delay: ${cfg.federation_rc_sleep_delay}
 federation_rc_reject_limit: ${cfg.federation_rc_reject_limit}
 federation_rc_concurrent: ${cfg.federation_rc_concurrent}
-media_store_path: "/var/lib/matrix-synapse/media"
-uploads_path: "/var/lib/matrix-synapse/uploads"
+media_store_path: "${cfg.dataDir}/media"
+uploads_path: "${cfg.dataDir}/uploads"
 max_upload_size: "${cfg.max_upload_size}"
 max_image_pixels: "${cfg.max_image_pixels}"
 dynamic_thumbnails: ${boolToString cfg.dynamic_thumbnails}
@@ -86,7 +87,7 @@ ${optionalString (cfg.macaroon_secret_key != null) ''
 expire_access_token: ${boolToString cfg.expire_access_token}
 enable_metrics: ${boolToString cfg.enable_metrics}
 report_stats: ${boolToString cfg.report_stats}
-signing_key_path: "/var/lib/matrix-synapse/homeserver.signing.key"
+signing_key_path: "${cfg.dataDir}/homeserver.signing.key"
 key_refresh_interval: "${cfg.key_refresh_interval}"
 perspectives:
   servers: {
@@ -156,7 +157,7 @@ in {
       tls_certificate_path = mkOption {
         type = types.nullOr types.str;
         default = null;
-        example = "/var/lib/matrix-synapse/homeserver.tls.crt";
+        example = "${cfg.dataDir}/homeserver.tls.crt";
         description = ''
           PEM encoded X509 certificate for TLS.
           You can replace the self-signed certificate that synapse
@@ -168,7 +169,7 @@ in {
       tls_private_key_path = mkOption {
         type = types.nullOr types.str;
         default = null;
-        example = "/var/lib/matrix-synapse/homeserver.tls.key";
+        example = "${cfg.dataDir}/homeserver.tls.key";
         description = ''
           PEM encoded private key for TLS. Specify null if synapse is not
           speaking TLS directly.
@@ -177,7 +178,7 @@ in {
       tls_dh_params_path = mkOption {
         type = types.nullOr types.str;
         default = null;
-        example = "/var/lib/matrix-synapse/homeserver.tls.dh";
+        example = "${cfg.dataDir}/homeserver.tls.dh";
         description = ''
           PEM dh parameters for ephemeral keys
         '';
@@ -185,6 +186,7 @@ in {
       server_name = mkOption {
         type = types.str;
         example = "example.com";
+        default = config.networking.hostName;
         description = ''
           The domain name of the server, with optional explicit port.
           This is used by remote servers to connect to this server,
@@ -340,16 +342,39 @@ in {
       };
       database_type = mkOption {
         type = types.enum [ "sqlite3" "psycopg2" ];
-        default = "sqlite3";
+        default = if versionAtLeast config.system.stateVersion "18.03"
+          then "psycopg2"
+          else "sqlite3";
         description = ''
           The database engine name. Can be sqlite or psycopg2.
         '';
       };
+      create_local_database = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Whether to create a local database automatically.
+        '';
+      };
+      database_name = mkOption {
+        type = types.str;
+        default = "matrix-synapse";
+        description = "Database name.";
+      };
+      database_user = mkOption {
+        type = types.str;
+        default = "matrix-synapse";
+        description = "Database user name.";
+      };
       database_args = mkOption {
         type = types.attrs;
         default = {
-          database = "/var/lib/matrix-synapse/homeserver.db";
-        };
+          sqlite3 = { database = "${cfg.dataDir}/homeserver.db"; };
+          psycopg2 = {
+            user = cfg.database_user;
+            database = cfg.database_name;
+          };
+        }."${cfg.database_type}";
         description = ''
           Arguments to pass to the engine.
         '';
@@ -579,6 +604,18 @@ in {
           Extra config options for matrix-synapse.
         '';
       };
+      extraConfigFiles = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = ''
+          Extra config files to include.
+
+          The configuration files will be included based on the command line
+          argument --config-path. This allows to configure secrets without
+          having to go through the Nix store, e.g. based on deployment keys if
+          NixOPS is in use.
+        '';
+      };
       logConfig = mkOption {
         type = types.lines;
         default = readFile ./matrix-synapse-log_config.yaml;
@@ -586,6 +623,14 @@ in {
           A yaml python logging config file
         '';
       };
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/matrix-synapse";
+        description = ''
+          The directory where matrix-synapse stores its stateful data such as
+          certificates, media and uploads.
+        '';
+      };
     };
   };
 
@@ -593,7 +638,7 @@ in {
     users.extraUsers = [
       { name = "matrix-synapse";
         group = "matrix-synapse";
-        home = "/var/lib/matrix-synapse";
+        home = cfg.dataDir;
         createHome = true;
         shell = "${pkgs.bash}/bin/bash";
         uid = config.ids.uids.matrix-synapse;
@@ -604,23 +649,48 @@ in {
         gid = config.ids.gids.matrix-synapse;
       } ];
 
+    services.postgresql.enable = mkIf usePostgresql (mkDefault true);
+
     systemd.services.matrix-synapse = {
       description = "Synapse Matrix homeserver";
-      after = [ "network.target" ];
+      after = [ "network.target" "postgresql.service" ];
       wantedBy = [ "multi-user.target" ];
       preStart = ''
         ${cfg.package}/bin/homeserver \
           --config-path ${configFile} \
-          --keys-directory /var/lib/matrix-synapse \
+          --keys-directory ${cfg.dataDir} \
           --generate-keys
+      '' + optionalString (usePostgresql && cfg.create_local_database) ''
+        if ! test -e "${cfg.dataDir}/db-created"; then
+          ${pkgs.sudo}/bin/sudo -u ${pg.superUser} \
+            ${pg.package}/bin/createuser \
+            --login \
+            --no-createdb \
+            --no-createrole \
+            --encrypted \
+            ${cfg.database_user}
+          ${pkgs.sudo}/bin/sudo -u ${pg.superUser} \
+            ${pg.package}/bin/createdb \
+            --owner=${cfg.database_user} \
+            --encoding=UTF8 \
+            --lc-collate=C \
+            --lc-ctype=C \
+            --template=template0 \
+            ${cfg.database_name}
+          touch "${cfg.dataDir}/db-created"
+        fi
       '';
       serviceConfig = {
         Type = "simple";
         User = "matrix-synapse";
         Group = "matrix-synapse";
-        WorkingDirectory = "/var/lib/matrix-synapse";
+        WorkingDirectory = cfg.dataDir;
         PermissionsStartOnly = true;
-        ExecStart = "${cfg.package}/bin/homeserver --config-path ${configFile} --keys-directory /var/lib/matrix-synapse";
+        ExecStart = ''
+          ${cfg.package}/bin/homeserver \
+            ${ concatMapStringsSep "\n  " (x: "--config-path ${x} \\") ([ configFile ] ++ cfg.extraConfigFiles) }
+            --keys-directory ${cfg.dataDir}
+        '';
         Restart = "on-failure";
       };
     };
diff --git a/nixos/modules/services/misc/mbpfan.nix b/nixos/modules/services/misc/mbpfan.nix
index 972d8b572d36..50f6f80ad00c 100644
--- a/nixos/modules/services/misc/mbpfan.nix
+++ b/nixos/modules/services/misc/mbpfan.nix
@@ -8,13 +8,7 @@ let
 
 in {
   options.services.mbpfan = {
-    enable = mkOption {
-      default = false;
-      type = types.bool;
-      description = ''
-        Whether to enable the mbpfan daemon.
-      '';
-    };
+    enable = mkEnableOption "mbpfan, fan controller daemon for Apple Macs and MacBooks";
 
     package = mkOption {
       type = types.package;
diff --git a/nixos/modules/services/misc/mesos-slave.nix b/nixos/modules/services/misc/mesos-slave.nix
index 47be10274d3b..effa29b64f63 100644
--- a/nixos/modules/services/misc/mesos-slave.nix
+++ b/nixos/modules/services/misc/mesos-slave.nix
@@ -188,7 +188,7 @@ in {
       description = "Mesos Slave";
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
-      path = [ pkgs.stdenv.shellPackage ];
+      path = [ pkgs.runtimeShellPackage ];
       serviceConfig = {
         ExecStart = ''
           ${pkgs.mesos}/bin/mesos-slave \
@@ -213,7 +213,7 @@ in {
         PermissionsStartOnly = true;
       };
       preStart = ''
-        mkdir -m 0700 -p ${cfg.workDir}
+        mkdir -m 0701 -p ${cfg.workDir}
       '';
     };
   };
diff --git a/nixos/modules/services/misc/nix-daemon.nix b/nixos/modules/services/misc/nix-daemon.nix
index beca820d2d60..f2d34560a718 100644
--- a/nixos/modules/services/misc/nix-daemon.nix
+++ b/nixos/modules/services/misc/nix-daemon.nix
@@ -8,7 +8,7 @@ let
 
   nix = cfg.package.out;
 
-  isNix112 = versionAtLeast (getVersion nix) "1.12pre";
+  isNix20 = versionAtLeast (getVersion nix) "2.0pre";
 
   makeNixBuildUser = nr:
     { name = "nixbld${toString nr}";
@@ -26,32 +26,40 @@ let
 
   nixConf =
     let
-      # If we're using sandbox for builds, then provide /bin/sh in
-      # the sandbox as a bind-mount to bash. This means we also need to
-      # include the entire closure of bash.
-      sh = pkgs.stdenv.shell;
+      # In Nix < 2.0, If we're using sandbox for builds, then provide
+      # /bin/sh in the sandbox as a bind-mount to bash. This means we
+      # also need to include the entire closure of bash. Nix >= 2.0
+      # provides a /bin/sh by default.
+      sh = pkgs.runtimeShell;
       binshDeps = pkgs.writeReferencesToFile sh;
     in
-      pkgs.runCommand "nix.conf" {extraOptions = cfg.extraOptions; } ''
-        extraPaths=$(for i in $(cat ${binshDeps}); do if test -d $i; then echo $i; fi; done)
+      pkgs.runCommand "nix.conf" { extraOptions = cfg.extraOptions; } ''
+        ${optionalString (!isNix20) ''
+          extraPaths=$(for i in $(cat ${binshDeps}); do if test -d $i; then echo $i; fi; done)
+        ''}
         cat > $out <<END
         # WARNING: this file is generated from the nix.* options in
         # your NixOS configuration, typically
         # /etc/nixos/configuration.nix.  Do not edit it!
         build-users-group = nixbld
-        build-max-jobs = ${toString (cfg.maxJobs)}
-        build-cores = ${toString (cfg.buildCores)}
-        build-use-sandbox = ${if (builtins.isBool cfg.useSandbox) then boolToString cfg.useSandbox else cfg.useSandbox}
-        build-sandbox-paths = ${toString cfg.sandboxPaths} /bin/sh=${sh} $(echo $extraPaths)
-        binary-caches = ${toString cfg.binaryCaches}
-        trusted-binary-caches = ${toString cfg.trustedBinaryCaches}
-        binary-cache-public-keys = ${toString cfg.binaryCachePublicKeys}
+        ${if isNix20 then "max-jobs" else "build-max-jobs"} = ${toString (cfg.maxJobs)}
+        ${if isNix20 then "cores" else "build-cores"} = ${toString (cfg.buildCores)}
+        ${if isNix20 then "sandbox" else "build-use-sandbox"} = ${if (builtins.isBool cfg.useSandbox) then boolToString cfg.useSandbox else cfg.useSandbox}
+        ${if isNix20 then "extra-sandbox-paths" else "build-sandbox-paths"} = ${toString cfg.sandboxPaths} ${optionalString (!isNix20) "/bin/sh=${sh} $(echo $extraPaths)"}
+        ${if isNix20 then "substituters" else "binary-caches"} = ${toString cfg.binaryCaches}
+        ${if isNix20 then "trusted-substituters" else "trusted-binary-caches"} = ${toString cfg.trustedBinaryCaches}
+        ${if isNix20 then "trusted-public-keys" else "binary-cache-public-keys"} = ${toString cfg.binaryCachePublicKeys}
         auto-optimise-store = ${boolToString cfg.autoOptimiseStore}
-        ${optionalString cfg.requireSignedBinaryCaches ''
-          signed-binary-caches = *
+        ${if isNix20 then ''
+          require-sigs = ${if cfg.requireSignedBinaryCaches then "true" else "false"}
+        '' else ''
+          signed-binary-caches = ${if cfg.requireSignedBinaryCaches then "*" else ""}
         ''}
         trusted-users = ${toString cfg.trustedUsers}
         allowed-users = ${toString cfg.allowedUsers}
+        ${optionalString (isNix20 && !cfg.distributedBuilds) ''
+          builders =
+        ''}
         $extraOptions
         END
       '';
@@ -377,8 +385,9 @@ in
     systemd.sockets.nix-daemon.wantedBy = [ "sockets.target" ];
 
     systemd.services.nix-daemon =
-      { path = [ nix pkgs.openssl.bin pkgs.utillinux config.programs.ssh.package ]
-          ++ optionals cfg.distributedBuilds [ pkgs.gzip ];
+      { path = [ nix pkgs.utillinux ]
+          ++ optionals cfg.distributedBuilds [ config.programs.ssh.package pkgs.gzip ]
+          ++ optionals (!isNix20) [ pkgs.openssl.bin ];
 
         environment = cfg.envVars
           // { CURL_CA_BUNDLE = "/etc/ssl/certs/ca-certificates.crt"; }
@@ -396,10 +405,9 @@ in
       };
 
     nix.envVars =
-      { NIX_CONF_DIR = "/etc/nix";
-      }
+      optionalAttrs (!isNix20) {
+        NIX_CONF_DIR = "/etc/nix";
 
-      // optionalAttrs (!isNix112) {
         # Enable the copy-from-other-stores substituter, which allows
         # builds to be sped up by copying build results from remote
         # Nix stores.  To do this, mount the remote file system on a
@@ -407,12 +415,8 @@ in
         NIX_OTHER_STORES = "/run/nix/remote-stores/*/nix";
       }
 
-      // optionalAttrs cfg.distributedBuilds {
-        NIX_BUILD_HOOK =
-          if isNix112 then
-            "${nix}/libexec/nix/build-remote"
-          else
-            "${nix}/libexec/nix/build-remote.pl";
+      // optionalAttrs (cfg.distributedBuilds && !isNix20) {
+        NIX_BUILD_HOOK = "${nix}/libexec/nix/build-remote.pl";
       };
 
     # Set up the environment variables for running Nix.
@@ -420,7 +424,7 @@ in
       { NIX_PATH = concatStringsSep ":" cfg.nixPath;
       };
 
-    environment.extraInit =
+    environment.extraInit = optionalString (!isNix20)
       ''
         # Set up secure multi-user builds: non-root users build through the
         # Nix daemon.
@@ -435,19 +439,18 @@ in
 
     services.xserver.displayManager.hiddenUsers = map ({ name, ... }: name) nixbldUsers;
 
+    # FIXME: use systemd-tmpfiles to create Nix directories.
     system.activationScripts.nix = stringAfter [ "etc" "users" ]
       ''
         # Nix initialisation.
-        mkdir -m 0755 -p \
+        install -m 0755 -d \
           /nix/var/nix/gcroots \
           /nix/var/nix/temproots \
-          /nix/var/nix/manifests \
           /nix/var/nix/userpool \
           /nix/var/nix/profiles \
           /nix/var/nix/db \
-          /nix/var/log/nix/drvs \
-          /nix/var/nix/channel-cache
-        mkdir -m 1777 -p \
+          /nix/var/log/nix/drvs
+        install -m 1777 -d \
           /nix/var/nix/gcroots/per-user \
           /nix/var/nix/profiles/per-user \
           /nix/var/nix/gcroots/tmp
diff --git a/nixos/modules/services/misc/nix-ssh-serve.nix b/nixos/modules/services/misc/nix-ssh-serve.nix
index 66148431709f..5bd9cf9086f1 100644
--- a/nixos/modules/services/misc/nix-ssh-serve.nix
+++ b/nixos/modules/services/misc/nix-ssh-serve.nix
@@ -1,8 +1,12 @@
 { config, lib, pkgs, ... }:
 
 with lib;
-
-{
+let cfg = config.nix.sshServe;
+    command =
+      if cfg.protocol == "ssh"
+        then "nix-store --serve"
+      else "nix-daemon --stdio";
+in {
   options = {
 
     nix.sshServe = {
@@ -10,7 +14,7 @@ with lib;
       enable = mkOption {
         type = types.bool;
         default = false;
-        description = "Whether to enable serving the Nix store as a binary cache via SSH.";
+        description = "Whether to enable serving the Nix store as a remote store via SSH.";
       };
 
       keys = mkOption {
@@ -20,14 +24,20 @@ with lib;
         description = "A list of SSH public keys allowed to access the binary cache via SSH.";
       };
 
+      protocol = mkOption {
+        type = types.enum [ "ssh" "ssh-ng" ];
+        default = "ssh";
+        description = "The specific Nix-over-SSH protocol to use.";
+      };
+
     };
 
   };
 
-  config = mkIf config.nix.sshServe.enable {
+  config = mkIf cfg.enable {
 
     users.extraUsers.nix-ssh = {
-      description = "Nix SSH substituter user";
+      description = "Nix SSH store user";
       uid = config.ids.uids.nix-ssh;
       useDefaultShell = true;
     };
@@ -41,11 +51,11 @@ with lib;
         PermitTTY no
         PermitTunnel no
         X11Forwarding no
-        ForceCommand ${config.nix.package.out}/bin/nix-store --serve
+        ForceCommand ${config.nix.package.out}/bin/${command}
       Match All
     '';
 
-    users.extraUsers.nix-ssh.openssh.authorizedKeys.keys = config.nix.sshServe.keys;
+    users.extraUsers.nix-ssh.openssh.authorizedKeys.keys = cfg.keys;
 
   };
 }
diff --git a/nixos/modules/services/misc/nixos-manual.nix b/nixos/modules/services/misc/nixos-manual.nix
index 41cadb4a6de0..4bd1c20edf71 100644
--- a/nixos/modules/services/misc/nixos-manual.nix
+++ b/nixos/modules/services/misc/nixos-manual.nix
@@ -16,14 +16,14 @@ let
     It isn't perfect, but it seems to cover a vast majority of use cases.
     Caveat: even if the package is reached by a different means,
     the path above will be shown and not e.g. `${config.services.foo.package}`. */
-  manual = import ../../../doc/manual {
+  manual = import ../../../doc/manual rec {
     inherit pkgs config;
-    version = config.system.nixosRelease;
-    revision = "release-${config.system.nixosRelease}";
+    version = config.system.nixos.release;
+    revision = "release-${version}";
     options =
       let
         scrubbedEval = evalModules {
-          modules = [ { nixpkgs.system = config.nixpkgs.system; } ] ++ baseModules;
+          modules = [ { nixpkgs.localSystem = config.nixpkgs.localSystem; } ] ++ baseModules;
           args = (config._module.args) // { modules = [ ]; };
           specialArgs = { pkgs = scrubDerivations "pkgs" pkgs; };
         };
@@ -43,7 +43,7 @@ let
 
   helpScript = pkgs.writeScriptBin "nixos-help"
     ''
-      #! ${pkgs.stdenv.shell} -e
+      #! ${pkgs.runtimeShell} -e
       browser="$BROWSER"
       if [ -z "$browser" ]; then
         browser="$(type -P xdg-open || true)"
@@ -112,10 +112,10 @@ in
 
     system.build.manual = manual;
 
-    environment.systemPackages =
-      [ manual.manual helpScript ]
-      ++ optionals config.services.xserver.enable [desktopItem pkgs.nixos-icons]
-      ++ optional config.programs.man.enable manual.manpages;
+    environment.systemPackages = []
+      ++ optionals config.services.xserver.enable [ desktopItem pkgs.nixos-icons ]
+      ++ optional  config.documentation.man.enable manual.manpages
+      ++ optionals config.documentation.doc.enable [ manual.manual helpScript ];
 
     boot.extraTTYs = mkIf cfg.showManual ["tty${toString cfg.ttyNumber}"];
 
diff --git a/nixos/modules/services/misc/novacomd.nix b/nixos/modules/services/misc/novacomd.nix
new file mode 100644
index 000000000000..7cfc68d2b673
--- /dev/null
+++ b/nixos/modules/services/misc/novacomd.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.novacomd;
+
+in {
+
+  options = {
+    services.novacomd = {
+      enable = mkEnableOption "Novacom service for connecting to WebOS devices";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.webos.novacom ];
+
+    systemd.services.novacomd = {
+      description = "Novacom WebOS daemon";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.webos.novacomd}/sbin/novacomd";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ dtzWill ];
+}
diff --git a/nixos/modules/services/misc/osrm.nix b/nixos/modules/services/misc/osrm.nix
new file mode 100644
index 000000000000..7ec8b15906fc
--- /dev/null
+++ b/nixos/modules/services/misc/osrm.nix
@@ -0,0 +1,85 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.osrm;
+in
+
+{
+  options.services.osrm = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Enable the OSRM service.";
+    };
+
+    address = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = "IP address on which the web server will listen.";
+    };
+
+    port = mkOption {
+      type = types.int;
+      default = 5000;
+      description = "Port on which the web server will run.";
+    };
+
+    threads = mkOption {
+      type = types.int;
+      default = 4;
+      description = "Number of threads to use.";
+    };
+
+    algorithm = mkOption {
+      type = types.enum [ "CH" "CoreCH" "MLD" ];
+      default = "MLD";
+      description = "Algorithm to use for the data. Must be one of CH, CoreCH, MLD";
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "--max-table-size 1000" "--max-matching-size 1000" ];
+      description = "Extra command line arguments passed to osrm-routed";
+    };
+
+    dataFile = mkOption {
+      type = types.path;
+      example = "/var/lib/osrm/berlin-latest.osrm";
+      description = "Data file location";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.osrm = {
+      group = config.users.users.osrm.name;
+      description = "OSRM user";
+      createHome = false;
+    };
+
+    users.groups.osrm = { };
+
+    systemd.services.osrm = {
+      description = "OSRM service";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = config.users.extraUsers.osrm.name;
+        ExecStart = ''
+          ${pkgs.osrm-backend}/bin/osrm-routed \
+            --ip ${cfg.address} \
+            --port ${toString cfg.port} \
+            --threads ${toString cfg.threads} \
+            --algorithm ${cfg.algorithm} \
+            ${toString cfg.extraFlags} \
+            ${cfg.dataFile}
+        '';
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/misc/parsoid.nix b/nixos/modules/services/misc/parsoid.nix
index ae3f84333d2d..c757093e5c1b 100644
--- a/nixos/modules/services/misc/parsoid.nix
+++ b/nixos/modules/services/misc/parsoid.nix
@@ -6,6 +6,8 @@ let
 
   cfg = config.services.parsoid;
 
+  parsoid = pkgs.nodePackages."parsoid-git://github.com/abbradar/parsoid#stable";
+
   confTree = {
     worker_heartbeat_timeout = 300000;
     logging = { level = "info"; };
@@ -93,7 +95,7 @@ in
       after = [ "network.target" ];
       serviceConfig = {
         User = "nobody";
-        ExecStart = "${pkgs.nodePackages.parsoid}/lib/node_modules/parsoid/bin/server.js -c ${confFile} -n ${toString cfg.workers}";
+        ExecStart = "${parsoid}/lib/node_modules/parsoid/bin/server.js -c ${confFile} -n ${toString cfg.workers}";
       };
     };
 
diff --git a/nixos/modules/services/misc/safeeyes.nix b/nixos/modules/services/misc/safeeyes.nix
new file mode 100644
index 000000000000..1a33971d9227
--- /dev/null
+++ b/nixos/modules/services/misc/safeeyes.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.safeeyes;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.safeeyes = {
+
+      enable = mkOption {
+        default = false;
+        description = "Whether to enable the safeeyes OSGi service";
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.user.services.safeeyes = {
+      description = "Safeeyes";
+
+      wantedBy = [ "graphical-session.target" ];
+      partOf   = [ "graphical-session.target" ];
+
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.safeeyes}/bin/safeeyes
+        '';
+        Restart = "on-failure";
+        RestartSec = 3;
+        StartLimitInterval = 350;
+        StartLimitBurst = 10;
+      };
+    };
+
+  };
+}
diff --git a/nixos/modules/services/misc/serviio.nix b/nixos/modules/services/misc/serviio.nix
new file mode 100644
index 000000000000..a6612e9c6adb
--- /dev/null
+++ b/nixos/modules/services/misc/serviio.nix
@@ -0,0 +1,92 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.serviio;
+
+  serviioStart = pkgs.writeScript "serviio.sh" ''
+    #!${pkgs.bash}/bin/sh
+
+    SERVIIO_HOME=${pkgs.serviio}
+    
+    # Setup the classpath
+    SERVIIO_CLASS_PATH="$SERVIIO_HOME/lib/*:$SERVIIO_HOME/config"
+
+    # Setup Serviio specific properties
+    JAVA_OPTS="-Djava.net.preferIPv4Stack=true -Djava.awt.headless=true -Dorg.restlet.engine.loggerFacadeClass=org.restlet.ext.slf4j.Slf4jLoggerFacade
+               -Dderby.system.home=${cfg.dataDir}/library -Dserviio.home=${cfg.dataDir} -Dffmpeg.location=${pkgs.ffmpeg}/bin/ffmpeg -Ddcraw.location=${pkgs.dcraw}/bin/dcraw"
+
+    # Execute the JVM in the foreground
+    exec ${pkgs.jre}/bin/java -Xmx512M -Xms20M -XX:+UseG1GC -XX:GCTimeRatio=1 -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=20 $JAVA_OPTS -classpath "$SERVIIO_CLASS_PATH" org.serviio.MediaServer "$@"
+  '';
+  
+in {
+
+  ###### interface
+  options = {
+    services.serviio = {
+      
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable the Serviio Media Server.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/serviio";
+        description = ''
+          The directory where serviio stores its state, data, etc.
+        '';
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.serviio = {
+      description = "Serviio Media Server";
+      after = [ "local-fs.target" "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.serviio ];
+      serviceConfig = {
+        User = "serviio";
+        Group = "serviio";
+        ExecStart = "${serviioStart}";
+        ExecStop = "${serviioStart} -stop";
+      };
+    };
+
+    users.extraUsers = [
+      { 
+        name = "serviio";
+        group = "serviio";
+        home = cfg.dataDir;
+        description = "Serviio Media Server User";
+        createHome = true;
+        isSystemUser = true;
+      }
+    ];
+
+    users.extraGroups = [
+      { name = "serviio";} 
+    ];
+
+    networking.firewall = {
+      allowedTCPPorts = [ 
+        8895  # serve UPnP responses
+        23423 # console
+        23424 # mediabrowser
+      ];
+      allowedUDPPorts = [ 
+        1900 # UPnP service discovey
+      ];
+    };
+  };
+}
diff --git a/nixos/modules/services/misc/ssm-agent.nix b/nixos/modules/services/misc/ssm-agent.nix
index c1e1f0903539..e951a4c7ffa8 100644
--- a/nixos/modules/services/misc/ssm-agent.nix
+++ b/nixos/modules/services/misc/ssm-agent.nix
@@ -8,11 +8,11 @@ let
   # in nixpkgs doesn't seem to work properly on NixOS, so let's just fake the two fields SSM
   # looks for. See https://github.com/aws/amazon-ssm-agent/issues/38 for upstream fix.
   fake-lsb-release = pkgs.writeScriptBin "lsb_release" ''
-    #!${pkgs.stdenv.shell}
+    #!${pkgs.runtimeShell}
 
     case "$1" in
       -i) echo "nixos";;
-      -r) echo "${config.system.nixosVersion}";;
+      -r) echo "${config.system.nixos.version}";;
     esac
   '';
 in {
diff --git a/nixos/modules/services/misc/xmr-stak.nix b/nixos/modules/services/misc/xmr-stak.nix
new file mode 100644
index 000000000000..57f439365471
--- /dev/null
+++ b/nixos/modules/services/misc/xmr-stak.nix
@@ -0,0 +1,73 @@
+{ lib, config, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xmr-stak;
+
+  pkg = pkgs.xmr-stak.override {
+    inherit (cfg) openclSupport cudaSupport;
+  };
+
+  xmrConfArg = optionalString (cfg.configText != "") ("-c " +
+    pkgs.writeText "xmr-stak-config.txt" cfg.configText);
+
+in
+
+{
+  options = {
+    services.xmr-stak = {
+      enable = mkEnableOption "xmr-stak miner";
+      openclSupport = mkEnableOption "support for OpenCL (AMD/ATI graphics cards)";
+      cudaSupport = mkEnableOption "support for CUDA (NVidia graphics cards)";
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--noCPU" "--currency monero" ];
+        description = "List of parameters to pass to xmr-stak.";
+      };
+
+      configText = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          "currency" : "monero",
+          "pool_list" :
+            [ { "pool_address" : "pool.supportxmr.com:5555",
+                "wallet_address" : "<long-hash>",
+                "pool_password" : "minername",
+                "pool_weight" : 1,
+              },
+            ],
+        '';
+        description = ''
+          Verbatim xmr-stak config.txt. If empty, the <literal>-c</literal>
+          parameter will not be added to the xmr-stak command.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.xmr-stak = {
+      wantedBy = [ "multi-user.target" ];
+      bindsTo = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      environment = mkIf cfg.cudaSupport {
+        LD_LIBRARY_PATH = "${pkgs.linuxPackages_latest.nvidia_x11}/lib";
+      };
+      script = ''
+        exec ${pkg}/bin/xmr-stak ${xmrConfArg} ${concatStringsSep " " cfg.extraArgs}
+      '';
+      serviceConfig = let rootRequired = cfg.openclSupport || cfg.cudaSupport; in {
+        # xmr-stak generates cpu and/or gpu configuration files
+        WorkingDirectory = "/tmp";
+        PrivateTmp = true;
+        DynamicUser = !rootRequired;
+        LimitMEMLOCK = toString (1024*1024);
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/misc/zookeeper.nix b/nixos/modules/services/misc/zookeeper.nix
index d85b5e4ec507..91539592511c 100644
--- a/nixos/modules/services/misc/zookeeper.nix
+++ b/nixos/modules/services/misc/zookeeper.nix
@@ -106,10 +106,19 @@ in {
       '';
     };
 
+    package = mkOption {
+      description = "The zookeeper package to use";
+      default = pkgs.zookeeper;
+      defaultText = "pkgs.zookeeper";
+      type = types.package;
+    };
+
   };
 
 
   config = mkIf cfg.enable {
+    environment.systemPackages = [cfg.package];
+
     systemd.services.zookeeper = {
       description = "Zookeeper Daemon";
       wantedBy = [ "multi-user.target" ];
@@ -118,7 +127,7 @@ in {
       serviceConfig = {
         ExecStart = ''
           ${pkgs.jre}/bin/java \
-            -cp "${pkgs.zookeeper}/lib/*:${pkgs.zookeeper}/${pkgs.zookeeper.name}.jar:${configDir}" \
+            -cp "${cfg.package}/lib/*:${cfg.package}/${cfg.package.name}.jar:${configDir}" \
             ${escapeShellArgs cfg.extraCmdLineOptions} \
             -Dzookeeper.datadir.autocreate=false \
             ${optionalString cfg.preferIPv4 "-Djava.net.preferIPv4Stack=true"} \
diff --git a/nixos/modules/services/monitoring/apcupsd.nix b/nixos/modules/services/monitoring/apcupsd.nix
index 9abd6e9ab641..839116de6265 100644
--- a/nixos/modules/services/monitoring/apcupsd.nix
+++ b/nixos/modules/services/monitoring/apcupsd.nix
@@ -38,7 +38,7 @@ let
   ];
 
   shellCmdsForEventScript = eventname: commands: ''
-    echo "#!${pkgs.stdenv.shell}" > "$out/${eventname}"
+    echo "#!${pkgs.runtimeShell}" > "$out/${eventname}"
     echo '${commands}' >> "$out/${eventname}"
     chmod a+x "$out/${eventname}"
   '';
diff --git a/nixos/modules/services/monitoring/fusion-inventory.nix b/nixos/modules/services/monitoring/fusion-inventory.nix
index 1c00f3c299e9..c3b869e00880 100644
--- a/nixos/modules/services/monitoring/fusion-inventory.nix
+++ b/nixos/modules/services/monitoring/fusion-inventory.nix
@@ -55,9 +55,6 @@ in {
       description = "Fusion Inventory Agent";
       wantedBy = [ "multi-user.target" ];
 
-      environment = {
-        OPTIONS = "--no-category=software";
-      };
       serviceConfig = {
         ExecStart = "${pkgs.fusionInventory}/bin/fusioninventory-agent --conf-file=${configFile} --daemon --no-fork";
       };
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 4fbacef788f9..eceb91525db4 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -25,6 +25,7 @@ let
     DATABASE_USER = cfg.database.user;
     DATABASE_PASSWORD = cfg.database.password;
     DATABASE_PATH = cfg.database.path;
+    DATABASE_CONN_MAX_LIFETIME = cfg.database.connMaxLifetime;
 
     SECURITY_ADMIN_USER = cfg.security.adminUser;
     SECURITY_ADMIN_PASSWORD = cfg.security.adminPassword;
@@ -49,7 +50,7 @@ in {
     protocol = mkOption {
       description = "Which protocol to listen.";
       default = "http";
-      type = types.enum ["http" "https"];
+      type = types.enum ["http" "https" "socket"];
     };
 
     addr = mkOption {
@@ -111,7 +112,7 @@ in {
       type = mkOption {
         description = "Database type.";
         default = "sqlite3";
-        type = types.enum ["mysql" "sqlite3" "postgresql"];
+        type = types.enum ["mysql" "sqlite3" "postgres"];
       };
 
       host = mkOption {
@@ -143,6 +144,15 @@ in {
         default = "${cfg.dataDir}/data/grafana.db";
         type = types.path;
       };
+
+      connMaxLifetime = mkOption {
+        description = ''
+          Sets the maximum amount of time (in seconds) a connection may be reused.
+          For MySQL this setting should be shorter than the `wait_timeout' variable.
+        '';
+        default = 14400;
+        type = types.int;
+      };
     };
 
     security = {
@@ -241,7 +251,9 @@ in {
       description = "Grafana Service Daemon";
       wantedBy = ["multi-user.target"];
       after = ["networking.target"];
-      environment = mapAttrs' (n: v: nameValuePair "GF_${n}" (toString v)) envOptions;
+      environment = {
+        QT_QPA_PLATFORM = "offscreen";
+      } // mapAttrs' (n: v: nameValuePair "GF_${n}" (toString v)) envOptions;
       serviceConfig = {
         ExecStart = "${cfg.package.bin}/bin/grafana-server -homepath ${cfg.dataDir}";
         WorkingDirectory = cfg.dataDir;
@@ -249,7 +261,7 @@ in {
       };
       preStart = ''
         ln -fs ${cfg.package}/share/grafana/conf ${cfg.dataDir}
-        ln -fs ${cfg.package}/share/grafana/vendor ${cfg.dataDir}
+        ln -fs ${cfg.package}/share/grafana/tools ${cfg.dataDir}
       '';
     };
 
diff --git a/nixos/modules/services/monitoring/monit.nix b/nixos/modules/services/monitoring/monit.nix
index 71f50cc0f19d..d48e5c550abb 100644
--- a/nixos/modules/services/monitoring/monit.nix
+++ b/nixos/modules/services/monitoring/monit.nix
@@ -26,16 +26,10 @@ in
 
     environment.systemPackages = [ pkgs.monit ];
 
-    environment.etc = [
-      {
-        source = pkgs.writeTextFile {
-          name = "monitrc";
-          text = config.services.monit.config;
-        };
-        target = "monitrc";
-        mode = "0400";
-      }
-    ];
+    environment.etc."monitrc" = {
+      text = config.services.monit.config;
+      mode = "0400";
+    };
 
     systemd.services.monit = {
       description = "Pro-active monitoring utility for unix systems";
@@ -48,6 +42,8 @@ in
         KillMode = "process";
         Restart = "always";
       };
+      restartTriggers = [ config.environment.etc."monitrc".source ];
     };
+
   };
 }
diff --git a/nixos/modules/services/monitoring/munin.nix b/nixos/modules/services/monitoring/munin.nix
index cc6d51f0ef1b..358ffd431dd4 100644
--- a/nixos/modules/services/monitoring/munin.nix
+++ b/nixos/modules/services/monitoring/munin.nix
@@ -17,40 +17,6 @@ let
   nodeCfg = config.services.munin-node;
   cronCfg = config.services.munin-cron;
 
-  muninPlugins = pkgs.stdenv.mkDerivation {
-    name = "munin-available-plugins";
-    buildCommand = ''
-      mkdir -p $out
-
-      cp --preserve=mode ${pkgs.munin}/lib/plugins/* $out/
-
-      for file in $out/*; do
-        case "$file" in
-            */plugin.sh|*/plugins.history)
-              chmod +x "$file"
-              continue;;
-        esac
-
-        # read magic makers from the file
-        family=$(sed -nr 's/.*#%#\s+family\s*=\s*(\S+)\s*/\1/p' $file)
-        cap=$(sed -nr 's/.*#%#\s+capabilities\s*=\s*(.+)/\1/p' $file)
-
-        wrapProgram $file \
-          --set PATH "/run/wrappers/bin:/run/current-system/sw/bin" \
-          --set MUNIN_LIBDIR "${pkgs.munin}/lib" \
-          --set MUNIN_PLUGSTATE "/var/run/munin"
-
-        # munin uses markers to tell munin-node-configure what a plugin can do
-        echo "#%# family=$family" >> $file
-        echo "#%# capabilities=$cap" >> $file
-      done
-
-      # NOTE: we disable disktstats because plugin seems to fail and it hangs html generation (100% CPU + memory leak)
-      rm -f $out/diskstats
-    '';
-    buildInputs = [ pkgs.makeWrapper ];
-  };
-
   muninConf = pkgs.writeText "munin.conf"
     ''
       dbdir     /var/lib/munin
@@ -83,6 +49,29 @@ let
 
       ${nodeCfg.extraConfig}
     '';
+
+  pluginConf = pkgs.writeText "munin-plugin-conf"
+    ''
+      [hddtemp_smartctl]
+      user root
+      group root
+
+      [meminfo]
+      user root
+      group root
+
+      [ipmi*]
+      user root
+      group root
+    '';
+
+  pluginConfDir = pkgs.stdenv.mkDerivation {
+    name = "munin-plugin-conf.d";
+    buildCommand = ''
+      mkdir $out
+      ln -s ${pluginConf} $out/nixos-config
+    '';
+  };
 in
 
 {
@@ -179,17 +168,22 @@ in
       description = "Munin Node";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      path = [ pkgs.munin ];
+      path = with pkgs; [ munin smartmontools "/run/current-system/sw" "/run/wrappers" ];
+      environment.MUNIN_LIBDIR = "${pkgs.munin}/lib";
       environment.MUNIN_PLUGSTATE = "/var/run/munin";
+      environment.MUNIN_LOGDIR = "/var/log/munin";
       preStart = ''
         echo "updating munin plugins..."
 
         mkdir -p /etc/munin/plugins
         rm -rf /etc/munin/plugins/*
-        PATH="/run/wrappers/bin:/run/current-system/sw/bin" ${pkgs.munin}/sbin/munin-node-configure --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${muninPlugins} --servicedir=/etc/munin/plugins 2>/dev/null | ${pkgs.bash}/bin/bash
+        ${pkgs.munin}/bin/munin-node-configure --suggest --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${pkgs.munin}/lib/plugins --servicedir=/etc/munin/plugins --sconfdir=${pluginConfDir} 2>/dev/null | ${pkgs.bash}/bin/bash
+
+        # NOTE: we disable disktstats because plugin seems to fail and it hangs html generation (100% CPU + memory leak)
+        rm /etc/munin/plugins/diskstats || true
       '';
       serviceConfig = {
-        ExecStart = "${pkgs.munin}/sbin/munin-node --config ${nodeConf} --servicedir /etc/munin/plugins/";
+        ExecStart = "${pkgs.munin}/sbin/munin-node --config ${nodeConf} --servicedir /etc/munin/plugins/ --sconfdir=${pluginConfDir}";
       };
     };
 
diff --git a/nixos/modules/services/monitoring/netdata.nix b/nixos/modules/services/monitoring/netdata.nix
index e1fde4fc9500..d23b329eeb25 100644
--- a/nixos/modules/services/monitoring/netdata.nix
+++ b/nixos/modules/services/monitoring/netdata.nix
@@ -5,18 +5,25 @@ with lib;
 let
   cfg = config.services.netdata;
 
-  configFile = pkgs.writeText "netdata.conf" cfg.configText;
+  wrappedPlugins = pkgs.runCommand "wrapped-plugins" {} ''
+    mkdir -p $out/libexec/netdata/plugins.d
+    ln -s /run/wrappers/bin/apps.plugin $out/libexec/netdata/plugins.d/apps.plugin
+  '';
+
+  localConfig = {
+    global = {
+      "plugins directory" = "${wrappedPlugins}/libexec/netdata/plugins.d ${pkgs.netdata}/libexec/netdata/plugins.d";
+    };
+  };
+  mkConfig = generators.toINI {} (recursiveUpdate localConfig cfg.config);
+  configFile = pkgs.writeText "netdata.conf" (if cfg.configText != null then cfg.configText else mkConfig);
 
   defaultUser = "netdata";
 
 in {
   options = {
     services.netdata = {
-      enable = mkOption {
-        default = false;
-        type = types.bool;
-        description = "Whether to enable netdata monitoring.";
-      };
+      enable = mkEnableOption "netdata";
 
       user = mkOption {
         type = types.str;
@@ -31,9 +38,9 @@ in {
       };
 
       configText = mkOption {
-        type = types.lines;
-        default = "";
-        description = "netdata.conf configuration.";
+        type = types.nullOr types.lines;
+        description = "Verbatim netdata.conf, cannot be combined with config.";
+        default = null;
         example = ''
           [global]
           debug log = syslog
@@ -42,11 +49,29 @@ in {
         '';
       };
 
+      config = mkOption {
+        type = types.attrsOf types.attrs;
+        default = {};
+        description = "netdata.conf configuration as nix attributes. cannot be combined with configText.";
+        example = literalExample ''
+          global = {
+            "debug log" = "syslog";
+            "access log" = "syslog";
+            "error log" = "syslog";
+          };
+        '';
+        };
+      };
     };
-  };
 
   config = mkIf cfg.enable {
+    assertions =
+      [ { assertion = cfg.config != {} -> cfg.configText == null ;
+          message = "Cannot specify both config and configText";
+        }
+      ];
     systemd.services.netdata = {
+      path = with pkgs; [ gawk curl ];
       description = "Real time performance monitoring";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
@@ -66,6 +91,15 @@ in {
       };
     };
 
+    security.wrappers."apps.plugin" = {
+      source = "${pkgs.netdata}/libexec/netdata/plugins.d/apps.plugin";
+      capabilities = "cap_dac_read_search,cap_sys_ptrace+ep";
+      owner = cfg.user;
+      group = cfg.group;
+      permissions = "u+rx,g+rx,o-rwx";
+    };
+
+
     users.extraUsers = optional (cfg.user == defaultUser) {
       name = defaultUser;
     };
diff --git a/nixos/modules/services/monitoring/prometheus/alertmanager.nix b/nixos/modules/services/monitoring/prometheus/alertmanager.nix
index cf761edad926..8a47c9f1e7d8 100644
--- a/nixos/modules/services/monitoring/prometheus/alertmanager.nix
+++ b/nixos/modules/services/monitoring/prometheus/alertmanager.nix
@@ -111,11 +111,11 @@ in {
       after    = [ "network.target" ];
       script = ''
         ${pkgs.prometheus-alertmanager.bin}/bin/alertmanager \
-        -config.file ${alertmanagerYml} \
-        -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
-        -log.level ${cfg.logLevel} \
-        ${optionalString (cfg.webExternalUrl != null) ''-web.external-url ${cfg.webExternalUrl} \''}
-        ${optionalString (cfg.logFormat != null) "-log.format ${cfg.logFormat}"}
+        --config.file ${alertmanagerYml} \
+        --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+        --log.level ${cfg.logLevel} \
+        ${optionalString (cfg.webExternalUrl != null) ''--web.external-url ${cfg.webExternalUrl} \''}
+        ${optionalString (cfg.logFormat != null) "--log.format ${cfg.logFormat}"}
       '';
 
       serviceConfig = {
diff --git a/nixos/modules/services/monitoring/prometheus/blackbox-exporter.nix b/nixos/modules/services/monitoring/prometheus/blackbox-exporter.nix
deleted file mode 100644
index ce2e1cf2d74b..000000000000
--- a/nixos/modules/services/monitoring/prometheus/blackbox-exporter.nix
+++ /dev/null
@@ -1,68 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  cfg = config.services.prometheus.blackboxExporter;
-in {
-  options = {
-    services.prometheus.blackboxExporter = {
-      enable = mkEnableOption "prometheus blackbox exporter";
-
-      configFile = mkOption {
-        type = types.path;
-        description = ''
-          Path to configuration file.
-        '';
-      };
-
-      port = mkOption {
-        type = types.int;
-        default = 9115;
-        description = ''
-          Port to listen on.
-        '';
-      };
-
-      extraFlags = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra commandline options when launching the blackbox exporter.
-        '';
-      };
-
-      openFirewall = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Open port in firewall for incoming connections.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
-
-    systemd.services.prometheus-blackbox-exporter = {
-      description = "Prometheus exporter for blackbox probes";
-      unitConfig.Documentation = "https://github.com/prometheus/blackbox_exporter";
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig = {
-        User = "nobody";
-        Restart = "always";
-        PrivateTmp = true;
-        WorkingDirectory = /tmp;
-        AmbientCapabilities = [ "CAP_NET_RAW" ]; # for ping probes
-        ExecStart = ''
-          ${pkgs.prometheus-blackbox-exporter}/bin/blackbox_exporter \
-            --web.listen-address :${toString cfg.port} \
-            --config.file ${cfg.configFile} \
-            ${concatStringsSep " \\\n  " cfg.extraFlags}
-        '';
-        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/monitoring/prometheus/collectd-exporter.nix b/nixos/modules/services/monitoring/prometheus/collectd-exporter.nix
deleted file mode 100644
index f8a5b9576a11..000000000000
--- a/nixos/modules/services/monitoring/prometheus/collectd-exporter.nix
+++ /dev/null
@@ -1,128 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  cfg = config.services.prometheus.collectdExporter;
-
-  collectSettingsArgs = if (cfg.collectdBinary.enable) then ''
-    -collectd.listen-address ${optionalString (cfg.collectdBinary.listenAddress != null) cfg.collectdBinary.listenAddress}:${toString cfg.collectdBinary.port} \
-    -collectd.security-level ${cfg.collectdBinary.securityLevel} \
-  '' else "";
-
-in {
-  options = {
-    services.prometheus.collectdExporter = {
-      enable = mkEnableOption "prometheus collectd exporter";
-
-      port = mkOption {
-        type = types.int;
-        default = 9103;
-        description = ''
-          Port to listen on.
-          This is used for scraping as well as the to receive collectd data via the write_http plugin.
-        '';
-      };
-
-      listenAddress = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        example = "0.0.0.0";
-        description = ''
-          Address to listen on for web interface, telemetry and collectd JSON data.
-        '';
-      };
-
-      collectdBinary = {
-        enable = mkEnableOption "collectd binary protocol receiver";
-
-        authFile = mkOption {
-          default = null;
-          type = types.nullOr types.path;
-          description = "File mapping user names to pre-shared keys (passwords).";
-        };
-
-        port = mkOption {
-          type = types.int;
-          default = 25826;
-          description = ''Network address on which to accept collectd binary network packets.'';
-        };
-
-        listenAddress = mkOption {
-          type = types.nullOr types.str;
-          default = null;
-          example = "0.0.0.0";
-          description = ''
-            Address to listen on for binary network packets.
-            '';
-        };
-
-        securityLevel = mkOption {
-          type = types.enum ["None" "Sign" "Encrypt"];
-          default = "None";
-          description = ''
-            Minimum required security level for accepted packets.
-            '';
-        };
-      };
-
-      extraFlags = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra commandline options when launching the collectd exporter.
-        '';
-      };
-
-      logFormat = mkOption {
-        type = types.str;
-        default = "logger:stderr";
-        example = "logger:syslog?appname=bob&local=7 or logger:stdout?json=true";
-        description = ''
-          Set the log target and format.
-        '';
-      };
-
-      logLevel = mkOption {
-        type = types.enum ["debug" "info" "warn" "error" "fatal"];
-        default = "info";
-        description = ''
-          Only log messages with the given severity or above.
-        '';
-      };
-
-      openFirewall = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Open port in firewall for incoming connections.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    networking.firewall.allowedTCPPorts = (optional cfg.openFirewall cfg.port) ++
-      (optional (cfg.openFirewall && cfg.collectdBinary.enable) cfg.collectdBinary.port);
-
-    systemd.services.prometheus-collectd-exporter = {
-      description = "Prometheus exporter for Collectd metrics";
-      unitConfig.Documentation = "https://github.com/prometheus/collectd_exporter";
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig = {
-        DynamicUser = true;
-        Restart = "always";
-        PrivateTmp = true;
-        WorkingDirectory = /tmp;
-        ExecStart = ''
-          ${pkgs.prometheus-collectd-exporter}/bin/collectd_exporter \
-            -log.format ${cfg.logFormat} \
-            -log.level ${cfg.logLevel} \
-            -web.listen-address ${optionalString (cfg.listenAddress != null) cfg.listenAddress}:${toString cfg.port} \
-            ${collectSettingsArgs} \
-            ${concatStringsSep " " cfg.extraFlags}
-        '';
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
new file mode 100644
index 000000000000..180b273d177e
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -0,0 +1,173 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters;
+
+  # each attribute in `exporterOpts` is expected to have specified:
+  #   - port        (types.int):   port on which the exporter listens
+  #   - serviceOpts (types.attrs): config that is merged with the
+  #                                default definition of the exporter's
+  #                                systemd service
+  #   - extraOpts   (types.attrs): extra configuration options to
+  #                                configure the exporter with, which
+  #                                are appended to the default options
+  #
+  #  Note that `extraOpts` is optional, but a script for the exporter's
+  #  systemd service must be provided by specifying either
+  #  `serviceOpts.script` or `serviceOpts.serviceConfig.ExecStart`
+  exporterOpts = {
+    blackbox = import ./exporters/blackbox.nix { inherit config lib pkgs; };
+    collectd = import ./exporters/collectd.nix { inherit config lib pkgs; };
+    dovecot  = import ./exporters/dovecot.nix  { inherit config lib pkgs; };
+    fritzbox = import ./exporters/fritzbox.nix { inherit config lib pkgs; };
+    json     = import ./exporters/json.nix     { inherit config lib pkgs; };
+    minio    = import ./exporters/minio.nix    { inherit config lib pkgs; };
+    nginx    = import ./exporters/nginx.nix    { inherit config lib pkgs; };
+    node     = import ./exporters/node.nix     { inherit config lib pkgs; };
+    postfix  = import ./exporters/postfix.nix  { inherit config lib pkgs; };
+    snmp     = import ./exporters/snmp.nix     { inherit config lib pkgs; };
+    unifi    = import ./exporters/unifi.nix    { inherit config lib pkgs; };
+    varnish  = import ./exporters/varnish.nix  { inherit config lib pkgs; };
+  };
+
+  mkExporterOpts = ({ name, port }: {
+    enable = mkEnableOption "the prometheus ${name} exporter";
+    port = mkOption {
+      type = types.int;
+      default = port;
+      description = ''
+        Port to listen on.
+      '';
+    };
+    listenAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = ''
+        Address to listen on.
+      '';
+    };
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = ''
+        Extra commandline options to pass to the ${name} exporter.
+      '';
+    };
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Open port in firewall for incoming connections.
+      '';
+    };
+    firewallFilter = mkOption {
+      type = types.str;
+      default = "-p tcp -m tcp --dport ${toString port}";
+      example = literalExample ''
+        "-i eth0 -p tcp -m tcp --dport ${toString port}"
+      '';
+      description = ''
+        Specify a filter for iptables to use when
+        <option>services.prometheus.exporters.${name}.openFirewall</option>
+        is true. It is used as `ip46tables -I INPUT <option>firewallFilter</option> -j ACCEPT`.
+      '';
+    };
+    user = mkOption {
+      type = types.str;
+      default = "nobody";
+      description = ''
+        User name under which the ${name} exporter shall be run.
+        Has no effect when <option>systemd.services.prometheus-${name}-exporter.serviceConfig.DynamicUser</option> is true.
+      '';
+    };
+    group = mkOption {
+      type = types.str;
+      default = "nobody";
+      description = ''
+        Group under which the ${name} exporter shall be run.
+        Has no effect when <option>systemd.services.prometheus-${name}-exporter.serviceConfig.DynamicUser</option> is true.
+      '';
+    };
+  });
+
+  mkSubModule = { name, port, extraOpts, serviceOpts }: {
+    ${name} = mkOption {
+      type = types.submodule {
+        options = (mkExporterOpts {
+          inherit name port;
+        } // extraOpts);
+      };
+      internal = true;
+      default = {};
+    };
+  };
+
+  mkSubModules = (foldl' (a: b: a//b) {}
+    (mapAttrsToList (name: opts: mkSubModule {
+      inherit name;
+      inherit (opts) port serviceOpts;
+      extraOpts = opts.extraOpts or {};
+    }) exporterOpts)
+  );
+
+  mkExporterConf = { name, conf, serviceOpts }:
+    mkIf conf.enable {
+      networking.firewall.extraCommands = mkIf conf.openFirewall ''
+        ip46tables -I INPUT ${conf.firewallFilter} -j ACCEPT
+      '';
+      systemd.services."prometheus-${name}-exporter" = mkMerge ([{
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig = {
+          Restart = mkDefault "always";
+          PrivateTmp = mkDefault true;
+          WorkingDirectory = mkDefault /tmp;
+        } // mkIf (!(serviceOpts.serviceConfig.DynamicUser or false)) {
+          User = conf.user;
+          Group = conf.group;
+        };
+      } serviceOpts ]);
+  };
+in
+{
+  options.services.prometheus.exporters = mkOption {
+    type = types.submodule {
+      options = (mkSubModules);
+    };
+    description = "Prometheus exporter configuration";
+    default = {};
+    example = literalExample ''
+      {
+        node = {
+          enable = true;
+          enabledCollectors = [ "systemd" ];
+        };
+        varnish.enable = true;
+      }
+    '';
+  };
+
+  config = mkMerge ([{
+    assertions = [{
+      assertion = (cfg.snmp.configurationPath == null) != (cfg.snmp.configuration == null);
+      message = ''
+        Please ensure you have either `services.prometheus.exporters.snmp.configuration'
+          or `services.prometheus.exporters.snmp.configurationPath' set!
+      '';
+    }];
+  }] ++ [(mkIf config.services.minio.enable {
+    services.prometheus.exporters.minio.minioAddress  = mkDefault "http://localhost:9000";
+    services.prometheus.exporters.minio.minioAccessKey = mkDefault config.services.minio.accessKey;
+    services.prometheus.exporters.minio.minioAccessSecret = mkDefault config.services.minio.secretKey;
+  })] ++ (mapAttrsToList (name: conf:
+    mkExporterConf {
+      inherit name;
+      inherit (conf) serviceOpts;
+      conf = cfg.${name};
+    }) exporterOpts)
+  );
+
+  meta.doc = ./exporters.xml;
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.xml b/nixos/modules/services/monitoring/prometheus/exporters.xml
new file mode 100644
index 000000000000..4f0bcb298106
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters.xml
@@ -0,0 +1,135 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="module-services-prometheus-exporters">
+
+<title>Prometheus exporters</title>
+
+<para>Prometheus exporters provide metrics for the <link xlink:href="https://prometheus.io">prometheus monitoring system</link>.</para>
+
+<section><title>Configuration</title>
+  <para>One of the most common exporters is the <link xlink:href="https://github.com/prometheus/node_exporter">node exporter</link>, it provides hardware and OS metrics from the host it's running on. The exporter could be configured as follows:
+<programlisting>
+  services.promtheus.exporters.node = {
+    enable = true;
+    enabledCollectors = [
+      "logind"
+      "systemd"
+    ];
+    disabledCollectors = [
+      "textfile"
+    ];
+    openFirewall = true;
+    firewallFilter = "-i br0 -p tcp -m tcp --dport 9100";
+  };
+</programlisting>
+It should now serve all metrics from the collectors
+that are explicitly enabled and the ones that are
+<link xlink:href="https://github.com/prometheus/node_exporter#enabled-by-default">enabled by default</link>, via http under <literal>/metrics</literal>. In this example the firewall should just
+allow incoming connections to the exporter's port on the bridge interface <literal>br0</literal>
+(this would have to be configured seperately of course).
+For more information about configuration see <literal>man configuration.nix</literal> or
+search through the <link xlink:href="https://nixos.org/nixos/options.html#prometheus.exporters">available options</link>.
+</para>
+</section>
+<section><title>Adding a new exporter</title>
+  <para>To add a new exporter, it has to be packaged first (see <literal>nixpkgs/pkgs/servers/monitoring/prometheus/</literal> for examples), then a module can be added. The postfix exporter is used in this example:</para>
+<itemizedlist>
+  <listitem>
+    <para>
+      Some default options for all exporters are provided by
+      <literal>nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix</literal>:
+    </para>
+  </listitem>
+  <listitem override='none'>
+    <itemizedlist>
+      <listitem><para><literal>enable</literal></para></listitem>
+      <listitem><para><literal>port</literal></para></listitem>
+      <listitem><para><literal>listenAddress</literal></para></listitem>
+      <listitem><para><literal>extraFlags</literal></para></listitem>
+      <listitem><para><literal>openFirewall</literal></para></listitem>
+      <listitem><para><literal>firewallFilter</literal></para></listitem>
+      <listitem><para><literal>user</literal></para></listitem>
+      <listitem><para><literal>group</literal></para></listitem>
+    </itemizedlist>
+  </listitem>
+  <listitem>
+    <para>As there is already a package available, the module can now be added.
+      This is accomplished by adding a new file to the
+      <literal>nixos/modules/services/monitoring/prometheus/exporters/</literal> directory,
+      which will be called postfix.nix and contains all exporter specific options
+      and configuration:
+      <programlisting>
+        # nixpgs/nixos/modules/services/prometheus/exporters/postfix.nix
+        { config, lib, pkgs }:
+
+        with lib;
+
+        let
+          # for convenience we define cfg here
+          cfg = config.services.prometheus.exporters.postfix;
+        in
+        {
+          port = 9154; # The postfix exporter listens on this port by default
+
+          # `extraOpts` is an attribute set which contains additional options
+          # (and optional overrides for default options).
+          # Note that this attribute is optional.
+          extraOpts = {
+            telemetryPath = mkOption {
+              type = types.str;
+              default = "/metrics";
+              description = ''
+                Path under which to expose metrics.
+              '';
+            };
+            logfilePath = mkOption {
+              type = types.path;
+              default = /var/log/postfix_exporter_input.log;
+              example = /var/log/mail.log;
+              description = ''
+                Path where Postfix writes log entries.
+                This file will be truncated by this exporter!
+              '';
+            };
+            showqPath = mkOption {
+              type = types.path;
+              default = /var/spool/postfix/public/showq;
+              example = /var/lib/postfix/queue/public/showq;
+              description = ''
+                Path at which Postfix places its showq socket.
+              '';
+            };
+          };
+
+          # `serviceOpts` is an attribute set which contains configuration
+          # for the exporter's systemd service. One of
+          # `serviceOpts.script` and `serviceOpts.serviceConfig.ExecStart`
+          # has to be specified here. This will be merged with the default
+          # service confiuration.
+          serviceOpts = {
+            serviceConfig = {
+              ExecStart = ''
+                ${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
+                  --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+                  --web.telemetry-path ${cfg.telemetryPath} \
+                  ${concatStringsSep " \\\n  " cfg.extraFlags}
+              '';
+            };
+          };
+        }
+      </programlisting>
+    </para>
+  </listitem>
+  <listitem>
+    <para>
+      This should already be enough for the postfix exporter. Additionally one could
+      now add assertions and conditional default values. This can be done in the
+      'meta-module' that combines all exporter definitions and generates the submodules:
+      <literal>nixpkgs/nixos/modules/services/prometheus/exporters.nix</literal>
+    </para>
+  </listitem>
+</itemizedlist>
+</section>
+</chapter>
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix b/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
new file mode 100644
index 000000000000..d09d1c4f3663
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.blackbox;
+in
+{
+  port = 9115;
+  extraOpts = {
+    configFile = mkOption {
+      type = types.path;
+      description = ''
+        Path to configuration file.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      AmbientCapabilities = [ "CAP_NET_RAW" ]; # for ping probes
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-blackbox-exporter}/bin/blackbox_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --config.file ${cfg.configFile} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix b/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix
new file mode 100644
index 000000000000..0eba3527162d
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix
@@ -0,0 +1,78 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.collectd;
+in
+{
+  port = 9103;
+  extraOpts = {
+    collectdBinary = {
+      enable = mkEnableOption "collectd binary protocol receiver";
+
+      authFile = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        description = "File mapping user names to pre-shared keys (passwords).";
+      };
+
+      port = mkOption {
+        type = types.int;
+        default = 25826;
+        description = ''Network address on which to accept collectd binary network packets.'';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = ''
+          Address to listen on for binary network packets.
+          '';
+      };
+
+      securityLevel = mkOption {
+        type = types.enum ["None" "Sign" "Encrypt"];
+        default = "None";
+        description = ''
+          Minimum required security level for accepted packets.
+        '';
+      };
+    };
+
+    logFormat = mkOption {
+      type = types.str;
+      default = "logger:stderr";
+      example = "logger:syslog?appname=bob&local=7 or logger:stdout?json=true";
+      description = ''
+        Set the log target and format.
+      '';
+    };
+
+    logLevel = mkOption {
+      type = types.enum ["debug" "info" "warn" "error" "fatal"];
+      default = "info";
+      description = ''
+        Only log messages with the given severity or above.
+      '';
+    };
+  };
+  serviceOpts = let
+    collectSettingsArgs = if (cfg.collectdBinary.enable) then ''
+      -collectd.listen-address ${cfg.collectdBinary.listenAddress}:${toString cfg.collectdBinary.port} \
+      -collectd.security-level ${cfg.collectdBinary.securityLevel} \
+    '' else "";
+  in {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-collectd-exporter}/bin/collectd_exporter \
+          -log.format ${cfg.logFormat} \
+          -log.level ${cfg.logLevel} \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          ${collectSettingsArgs} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix b/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix
new file mode 100644
index 000000000000..4ca6d4e5f8b6
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.dovecot;
+in
+{
+  port = 9166;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+    socketPath = mkOption {
+      type = types.path;
+      default = "/var/run/dovecot/stats";
+      example = "/var/run/dovecot2/stats";
+      description = ''
+        Path under which the stats socket is placed.
+        The user/group under which the exporter runs,
+        should be able to access the socket in order
+        to scrape the metrics successfully.
+      '';
+    };
+    scopes = mkOption {
+      type = types.listOf types.str;
+      default = [ "user" ];
+      example = [ "user" "global" ];
+      description = ''
+        Stats scopes to query.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-dovecot-exporter}/bin/dovecot_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --dovecot.socket-path ${cfg.socketPath} \
+          --dovecot.scopes ${concatStringsSep "," cfg.scopes} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix b/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix
new file mode 100644
index 000000000000..a3f1d9d31323
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix
@@ -0,0 +1,39 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.fritzbox;
+in
+{
+  port = 9133;
+  extraOpts = {
+    gatewayAddress = mkOption {
+      type = types.str;
+      default = "fritz.box";
+      description = ''
+        The hostname or IP of the FRITZ!Box.
+      '';
+    };
+
+    gatewayPort = mkOption {
+      type = types.int;
+      default = 49000;
+      description = ''
+        The port of the FRITZ!Box UPnP service.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-fritzbox-exporter}/bin/fritzbox_exporter \
+          -listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -gateway-address ${cfg.gatewayAddress} \
+          -gateway-port ${toString cfg.gatewayPort} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/json.nix b/nixos/modules/services/monitoring/prometheus/exporters/json.nix
new file mode 100644
index 000000000000..a5494e85e016
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/json.nix
@@ -0,0 +1,36 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.json;
+in
+{
+  port = 7979;
+  extraOpts = {
+    url = mkOption {
+      type = types.str;
+      description = ''
+        URL to scrape JSON from.
+      '';
+    };
+    configFile = mkOption {
+      type = types.path;
+      description = ''
+        Path to configuration file.
+      '';
+    };
+    listenAddress = {}; # not used
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-json-exporter}/bin/prometheus-json-exporter \
+          --port ${toString cfg.port} \
+          ${cfg.url} ${cfg.configFile} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/minio.nix b/nixos/modules/services/monitoring/prometheus/exporters/minio.nix
new file mode 100644
index 000000000000..3cc4ffdbc8fd
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/minio.nix
@@ -0,0 +1,65 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.minio;
+in
+{
+  port = 9290;
+  extraOpts = {
+    minioAddress = mkOption {
+      type = types.str;
+      example = "https://10.0.0.1:9000";
+      description = ''
+        The URL of the minio server.
+        Use HTTPS if Minio accepts secure connections only.
+        By default this connects to the local minio server if enabled.
+      '';
+    };
+
+    minioAccessKey = mkOption {
+      type = types.str;
+      example = "yourMinioAccessKey";
+      description = ''
+        The value of the Minio access key.
+        It is required in order to connect to the server.
+        By default this uses the one from the local minio server if enabled
+        and <literal>config.services.minio.accessKey</literal>.
+      '';
+    };
+
+    minioAccessSecret = mkOption {
+      type = types.str;
+      description = ''
+        The value of the Minio access secret.
+        It is required in order to connect to the server.
+        By default this uses the one from the local minio server if enabled
+        and <literal>config.services.minio.secretKey</literal>.
+      '';
+    };
+
+    minioBucketStats = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Collect statistics about the buckets and files in buckets.
+        It requires more computation, use it carefully in case of large buckets..
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-minio-exporter}/bin/minio-exporter \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -minio.server ${cfg.minioAddress} \
+          -minio.access-key ${cfg.minioAccessKey} \
+          -minio.access-secret ${cfg.minioAccessSecret} \
+          ${optionalString cfg.minioBucketStats "-minio.bucket-stats"} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix b/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
new file mode 100644
index 000000000000..6a3ba2d0457c
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.nginx;
+in
+{
+  port = 9113;
+  extraOpts = {
+    scrapeUri = mkOption {
+      type = types.string;
+      default = "http://localhost/nginx_status";
+      description = ''
+        Address to access the nginx status page.
+        Can be enabled with services.nginx.statusPage = true.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-nginx-exporter}/bin/nginx_exporter \
+          -nginx.scrape_uri '${cfg.scrapeUri}' \
+          -telemetry.address ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/node.nix b/nixos/modules/services/monitoring/prometheus/exporters/node.nix
new file mode 100644
index 000000000000..c85f5f9cfb2d
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/node.nix
@@ -0,0 +1,39 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.node;
+in
+{
+  port = 9100;
+  extraOpts = {
+    enabledCollectors = mkOption {
+      type = types.listOf types.string;
+      default = [];
+      example = ''[ "systemd" ]'';
+      description = ''
+        Collectors to enable. The collectors listed here are enabled in addition to the default ones.
+      '';
+    };
+    disabledCollectors = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = ''[ "timex" ]'';
+      description = ''
+        Collectors to disable which are enabled by default.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-node-exporter}/bin/node_exporter \
+          ${concatMapStringsSep " " (x: "--collector." + x) cfg.enabledCollectors} \
+          ${concatMapStringsSep " " (x: "--no-collector." + x) cfg.disabledCollectors} \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix b/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
new file mode 100644
index 000000000000..efe78ebcba86
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
@@ -0,0 +1,81 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.postfix;
+in
+{
+  port = 9154;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+    logfilePath = mkOption {
+      type = types.path;
+      default = "/var/log/postfix_exporter_input.log";
+      example = "/var/log/mail.log";
+      description = ''
+        Path where Postfix writes log entries.
+        This file will be truncated by this exporter!
+      '';
+    };
+    showqPath = mkOption {
+      type = types.path;
+      default = "/var/spool/postfix/public/showq";
+      example = "/var/lib/postfix/queue/public/showq";
+      description = ''
+        Path where Postfix places it's showq socket.
+      '';
+    };
+    systemd = {
+      enable = mkEnableOption ''
+        reading metrics from the systemd-journal instead of from a logfile
+      '';
+      unit = mkOption {
+        type = types.str;
+        default = "postfix.service";
+        description = ''
+          Name of the postfix systemd unit.
+        '';
+      };
+      slice = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Name of the postfix systemd slice.
+          This overrides the <option>systemd.unit</option>.
+        '';
+      };
+      journalPath = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = ''
+          Path to the systemd journal.
+        '';
+      };
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --postfix.showq_path ${cfg.showqPath} \
+          ${concatStringsSep " \\\n  " (cfg.extraFlags
+          ++ optional cfg.systemd.enable "--systemd.enable"
+          ++ optional cfg.systemd.enable (if cfg.systemd.slice != null
+                                          then "--systemd.slice ${cfg.systemd.slice}"
+                                          else "--systemd.unit ${cfg.systemd.unit}")
+          ++ optional (cfg.systemd.enable && (cfg.systemd.journalPath != null))
+                       "--systemd.jounal_path ${cfg.systemd.journalPath}"
+          ++ optional (!cfg.systemd.enable) "--postfix.logfile_path ${cfg.logfilePath}")}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix b/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
new file mode 100644
index 000000000000..404cd0a1896b
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
@@ -0,0 +1,71 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.snmp;
+in
+{
+  port = 9116;
+  extraOpts = {
+    configurationPath = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = ''
+        Path to a snmp exporter configuration file. Mutually exclusive with 'configuration' option.
+      '';
+      example = "./snmp.yml";
+    };
+
+    configuration = mkOption {
+      type = types.nullOr types.attrs;
+      default = {};
+      description = ''
+        Snmp exporter configuration as nix attribute set. Mutually exclusive with 'configurationPath' option.
+      '';
+      example = ''
+        {
+          "default" = {
+            "version" = 2;
+            "auth" = {
+              "community" = "public";
+            };
+          };
+        };
+      '';
+    };
+
+    logFormat = mkOption {
+      type = types.str;
+      default = "logger:stderr";
+      description = ''
+        Set the log target and format.
+      '';
+    };
+
+    logLevel = mkOption {
+      type = types.enum ["debug" "info" "warn" "error" "fatal"];
+      default = "info";
+      description = ''
+        Only log messages with the given severity or above.
+      '';
+    };
+  };
+  serviceOpts = let
+    configFile = if cfg.configurationPath != null
+                 then cfg.configurationPath
+                 else "${pkgs.writeText "snmp-eporter-conf.yml" (builtins.toJSON cfg.configuration)}";
+    in {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-snmp-exporter.bin}/bin/snmp_exporter \
+          -config.file ${configFile} \
+          -log.format ${cfg.logFormat} \
+          -log.level ${cfg.logLevel} \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix b/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix
new file mode 100644
index 000000000000..011dcbe208e4
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix
@@ -0,0 +1,67 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.unifi;
+in
+{
+  port = 9130;
+  extraOpts = {
+    unifiAddress = mkOption {
+      type = types.str;
+      example = "https://10.0.0.1:8443";
+      description = ''
+        URL of the UniFi Controller API.
+      '';
+    };
+
+    unifiInsecure = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        If enabled skip the verification of the TLS certificate of the UniFi Controller API.
+        Use with caution.
+      '';
+    };
+
+    unifiUsername = mkOption {
+      type = types.str;
+      example = "ReadOnlyUser";
+      description = ''
+        username for authentication against UniFi Controller API.
+      '';
+    };
+
+    unifiPassword = mkOption {
+      type = types.str;
+      description = ''
+        Password for authentication against UniFi Controller API.
+      '';
+    };
+
+    unifiTimeout = mkOption {
+      type = types.str;
+      default = "5s";
+      example = "2m";
+      description = ''
+        Timeout including unit for UniFi Controller API requests.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-unifi-exporter}/bin/unifi_exporter \
+          -telemetry.addr ${cfg.listenAddress}:${toString cfg.port} \
+          -unifi.addr ${cfg.unifiAddress} \
+          -unifi.username ${cfg.unifiUsername} \
+          -unifi.password ${cfg.unifiPassword} \
+          -unifi.timeout ${cfg.unifiTimeout} \
+          ${optionalString cfg.unifiInsecure "-unifi.insecure" } \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix b/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
new file mode 100644
index 000000000000..b439a83e7aa2
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.varnish;
+in
+{
+  port = 9131;
+  serviceOpts = {
+    path = [ pkgs.varnish ];
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-varnish-exporter}/bin/prometheus_varnish_exporter \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/fritzbox-exporter.nix b/nixos/modules/services/monitoring/prometheus/fritzbox-exporter.nix
deleted file mode 100644
index 6da39b6519cb..000000000000
--- a/nixos/modules/services/monitoring/prometheus/fritzbox-exporter.nix
+++ /dev/null
@@ -1,76 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  cfg = config.services.prometheus.fritzboxExporter;
-in {
-  options = {
-    services.prometheus.fritzboxExporter = {
-      enable = mkEnableOption "prometheus fritzbox exporter";
-
-      port = mkOption {
-        type = types.int;
-        default = 9133;
-        description = ''
-          Port to listen on.
-        '';
-      };
-
-      gatewayAddress = mkOption {
-        type = types.str;
-        default = "fritz.box";
-        description = ''
-          The hostname or IP of the FRITZ!Box.
-        '';
-      };
-
-      gatewayPort = mkOption {
-        type = types.int;
-        default = 49000;
-        description = ''
-          The port of the FRITZ!Box UPnP service.
-        '';
-      };
-
-      extraFlags = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra commandline options when launching the fritzbox exporter.
-        '';
-      };
-
-      openFirewall = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Open port in firewall for incoming connections.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
-
-    systemd.services.prometheus-fritzbox-exporter = {
-      description = "Prometheus exporter for FRITZ!Box via UPnP";
-      unitConfig.Documentation = "https://github.com/ndecker/fritzbox_exporter";
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig = {
-        User = "nobody";
-        Restart = "always";
-        PrivateTmp = true;
-        WorkingDirectory = /tmp;
-        ExecStart = ''
-          ${pkgs.prometheus-fritzbox-exporter}/bin/fritzbox_exporter \
-            -listen-address :${toString cfg.port} \
-            -gateway-address ${cfg.gatewayAddress} \
-            -gateway-port ${toString cfg.gatewayPort} \
-            ${concatStringsSep " \\\n  " cfg.extraFlags}
-        '';
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/monitoring/prometheus/json-exporter.nix b/nixos/modules/services/monitoring/prometheus/json-exporter.nix
deleted file mode 100644
index 6bc56df9834b..000000000000
--- a/nixos/modules/services/monitoring/prometheus/json-exporter.nix
+++ /dev/null
@@ -1,74 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  cfg = config.services.prometheus.jsonExporter;
-in {
-  options = {
-    services.prometheus.jsonExporter = {
-      enable = mkEnableOption "prometheus JSON exporter";
-
-      url = mkOption {
-        type = types.str;
-        description = ''
-          URL to scrape JSON from.
-        '';
-      };
-
-      configFile = mkOption {
-        type = types.path;
-        description = ''
-          Path to configuration file.
-        '';
-      };
-
-      port = mkOption {
-        type = types.int;
-        default = 7979;
-        description = ''
-          Port to listen on.
-        '';
-      };
-
-      extraFlags = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra commandline options when launching the JSON exporter.
-        '';
-      };
-
-      openFirewall = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Open port in firewall for incoming connections.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
-
-    systemd.services.prometheus-json-exporter = {
-      description = "Prometheus exporter for JSON over HTTP";
-      unitConfig.Documentation = "https://github.com/kawamuray/prometheus-json-exporter";
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig = {
-        User = "nobody";
-        Restart = "always";
-        PrivateTmp = true;
-        WorkingDirectory = /tmp;
-        ExecStart = ''
-          ${pkgs.prometheus-json-exporter}/bin/prometheus-json-exporter \
-            --port ${toString cfg.port} \
-            ${cfg.url} ${cfg.configFile} \
-            ${concatStringsSep " \\\n  " cfg.extraFlags}
-        '';
-        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/monitoring/prometheus/minio-exporter.nix b/nixos/modules/services/monitoring/prometheus/minio-exporter.nix
deleted file mode 100644
index 4314671523cf..000000000000
--- a/nixos/modules/services/monitoring/prometheus/minio-exporter.nix
+++ /dev/null
@@ -1,117 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  cfg = config.services.prometheus.minioExporter;
-in {
-  options = {
-    services.prometheus.minioExporter = {
-      enable = mkEnableOption "prometheus minio exporter";
-
-      port = mkOption {
-        type = types.int;
-        default = 9290;
-        description = ''
-          Port to listen on.
-        '';
-      };
-
-      listenAddress = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        example = "0.0.0.0";
-        description = ''
-          Address to listen on for web interface and telemetry.
-        '';
-      };
-
-      minioAddress = mkOption {
-        type = types.str;
-        example = "https://10.0.0.1:9000";
-        default = if config.services.minio.enable then "http://localhost:9000" else null;
-        description = ''
-          The URL of the minio server.
-          Use HTTPS if Minio accepts secure connections only.
-          By default this connects to the local minio server if enabled.
-        '';
-      };
-
-      minioAccessKey = mkOption ({
-        type = types.str;
-        example = "BKIKJAA5BMMU2RHO6IBB";
-        description = ''
-          The value of the Minio access key.
-          It is required in order to connect to the server.
-          By default this uses the one from the local minio server if enabled
-          and <literal>config.services.minio.accessKey</literal>.
-        '';
-      } // optionalAttrs (config.services.minio.enable && config.services.minio.accessKey != "") {
-        default = config.services.minio.accessKey;
-      });
-
-      minioAccessSecret = mkOption ({
-        type = types.str;
-        description = ''
-          The calue of the Minio access secret.
-          It is required in order to connect to the server.
-          By default this uses the one from the local minio server if enabled
-          and <literal>config.services.minio.secretKey</literal>.
-        '';
-      } // optionalAttrs (config.services.minio.enable && config.services.minio.secretKey != "") {
-        default = config.services.minio.secretKey;
-      });
-
-      minioBucketStats = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Collect statistics about the buckets and files in buckets.
-          It requires more computation, use it carefully in case of large buckets..
-        '';
-      };
-
-      extraFlags = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra commandline options when launching the minio exporter.
-        '';
-      };
-
-      openFirewall = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Open port in firewall for incoming connections.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
-
-    systemd.services.prometheus-minio-exporter = {
-      description = "Prometheus exporter for Minio server metrics";
-      unitConfig.Documentation = "https://github.com/joe-pll/minio-exporter";
-      wantedBy = [ "multi-user.target" ];
-      after = optional config.services.minio.enable "minio.service";
-      serviceConfig = {
-        DynamicUser = true;
-        Restart = "always";
-        PrivateTmp = true;
-        WorkingDirectory = /tmp;
-        ExecStart = ''
-          ${pkgs.prometheus-minio-exporter}/bin/minio-exporter \
-            -web.listen-address ${optionalString (cfg.listenAddress != null) cfg.listenAddress}:${toString cfg.port} \
-            -minio.server ${cfg.minioAddress} \
-            -minio.access-key ${cfg.minioAccessKey} \
-            -minio.access-secret ${cfg.minioAccessSecret} \
-            ${optionalString cfg.minioBucketStats "-minio.bucket-stats"} \
-            ${concatStringsSep " \\\n  " cfg.extraFlags}
-        '';
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/monitoring/prometheus/nginx-exporter.nix b/nixos/modules/services/monitoring/prometheus/nginx-exporter.nix
deleted file mode 100644
index 1ccafee3b18b..000000000000
--- a/nixos/modules/services/monitoring/prometheus/nginx-exporter.nix
+++ /dev/null
@@ -1,78 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  cfg = config.services.prometheus.nginxExporter;
-in {
-  options = {
-    services.prometheus.nginxExporter = {
-      enable = mkEnableOption "prometheus nginx exporter";
-
-      port = mkOption {
-        type = types.int;
-        default = 9113;
-        description = ''
-          Port to listen on.
-        '';
-      };
-
-      listenAddress = mkOption {
-        type = types.string;
-        default = "0.0.0.0";
-        description = ''
-          Address to listen on.
-        '';
-      };
-
-      scrapeUri = mkOption {
-        type = types.string;
-        default = "http://localhost/nginx_status";
-        description = ''
-          Address to access the nginx status page.
-          Can be enabled with services.nginx.statusPage = true.
-        '';
-      };
-
-      extraFlags = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra commandline options when launching the nginx exporter.
-        '';
-      };
-
-      openFirewall = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Open port in firewall for incoming connections.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
-
-    systemd.services.prometheus-nginx-exporter = {
-      after = [ "network.target" "nginx.service" ];
-      description = "Prometheus exporter for nginx metrics";
-      unitConfig.Documentation = "https://github.com/discordianfish/nginx_exporter";
-      wantedBy = [ "multi-user.target" ];
-      serviceConfig = {
-        User = "nobody";
-        Restart  = "always";
-        PrivateTmp = true;
-        WorkingDirectory = /tmp;
-        ExecStart = ''
-          ${pkgs.prometheus-nginx-exporter}/bin/nginx_exporter \
-            -nginx.scrape_uri '${cfg.scrapeUri}' \
-            -telemetry.address ${cfg.listenAddress}:${toString cfg.port} \
-            ${concatStringsSep " \\\n  " cfg.extraFlags}
-        '';
-        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/monitoring/prometheus/node-exporter.nix b/nixos/modules/services/monitoring/prometheus/node-exporter.nix
deleted file mode 100644
index bad4389ce799..000000000000
--- a/nixos/modules/services/monitoring/prometheus/node-exporter.nix
+++ /dev/null
@@ -1,87 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  cfg = config.services.prometheus.nodeExporter;
-in {
-  options = {
-    services.prometheus.nodeExporter = {
-      enable = mkEnableOption "prometheus node exporter";
-
-      port = mkOption {
-        type = types.int;
-        default = 9100;
-        description = ''
-          Port to listen on.
-        '';
-      };
-
-      listenAddress = mkOption {
-        type = types.string;
-        default = "0.0.0.0";
-        description = ''
-          Address to listen on.
-        '';
-      };
-
-      enabledCollectors = mkOption {
-        type = types.listOf types.string;
-        default = [];
-        example = ''[ "systemd" ]'';
-        description = ''
-          Collectors to enable. The collectors listed here are enabled in addition to the default ones.
-        '';
-      };
-
-      disabledCollectors = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        example = ''[ "timex" ]'';
-        description = ''
-          Collectors to disable which are enabled by default.
-        '';
-      };
-
-      extraFlags = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra commandline options when launching the node exporter.
-        '';
-      };
-
-      openFirewall = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Open port in firewall for incoming connections.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
-
-    systemd.services.prometheus-node-exporter = {
-      description = "Prometheus exporter for machine metrics";
-      unitConfig.Documentation = "https://github.com/prometheus/node_exporter";
-      wantedBy = [ "multi-user.target" ];
-      script = ''
-        exec ${pkgs.prometheus-node-exporter}/bin/node_exporter \
-          ${concatMapStringsSep " " (x: "--collector." + x) cfg.enabledCollectors} \
-          ${concatMapStringsSep " " (x: "--no-collector." + x) cfg.disabledCollectors} \
-          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
-          ${concatStringsSep " \\\n  " cfg.extraFlags}
-      '';
-      serviceConfig = {
-        User = "nobody";
-        Restart = "always";
-        PrivateTmp = true;
-        WorkingDirectory = /tmp;
-        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/monitoring/prometheus/snmp-exporter.nix b/nixos/modules/services/monitoring/prometheus/snmp-exporter.nix
deleted file mode 100644
index fe33f8c1f04d..000000000000
--- a/nixos/modules/services/monitoring/prometheus/snmp-exporter.nix
+++ /dev/null
@@ -1,127 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  cfg = config.services.prometheus.snmpExporter;
-  mkConfigFile = pkgs.writeText "snmp.yml" (if cfg.configurationPath == null then builtins.toJSON cfg.configuration else builtins.readFile cfg.configurationPath);
-in {
-  options = {
-    services.prometheus.snmpExporter = {
-      enable = mkEnableOption "Prometheus snmp exporter";
-
-      user = mkOption {
-        type = types.str;
-        default = "nobody";
-        description = ''
-          User name under which snmp exporter shall be run.
-        '';
-      };
-
-      group = mkOption {
-        type = types.str;
-        default = "nogroup";
-        description = ''
-          Group under which snmp exporter shall be run.
-        '';
-      };
-
-      port = mkOption {
-        type = types.int;
-        default = 9116;
-        description = ''
-          Port to listen on.
-        '';
-      };
-
-      listenAddress = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        description = ''
-          Address to listen on for web interface and telemetry.
-        '';
-      };
-
-      configurationPath = mkOption {
-        type = types.nullOr types.path;
-        default = null;
-        description = ''
-          Path to a snmp exporter configuration file. Mutually exclusive with 'configuration' option.
-        '';
-        example = "./snmp.yml";
-      };
-
-      configuration = mkOption {
-        type = types.nullOr types.attrs;
-        default = {};
-        description = ''
-          Snmp exporter configuration as nix attribute set. Mutually exclusive with 'configurationPath' option.
-        '';
-        example = ''
-          {
-            "default" = {
-              "version" = 2;
-              "auth" = {
-                "community" = "public";
-              };
-            };
-          };
-        '';
-      };
-
-      logFormat = mkOption {
-        type = types.str;
-        default = "logger:stderr";
-        description = ''
-          Set the log target and format.
-        '';
-      };
-
-      logLevel = mkOption {
-        type = types.enum ["debug" "info" "warn" "error" "fatal"];
-        default = "info";
-        description = ''
-          Only log messages with the given severity or above.
-        '';
-      };
-
-      openFirewall = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Open port in firewall for incoming connections.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
-
-    assertions = singleton
-      {
-        assertion = (cfg.configurationPath == null) != (cfg.configuration == null);
-        message = "Please ensure you have either 'configuration' or 'configurationPath' set!";
-      };
-
-    systemd.services.prometheus-snmp-exporter = {
-      wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" ];
-      script = ''
-        ${pkgs.prometheus-snmp-exporter.bin}/bin/snmp_exporter \
-          -config.file ${mkConfigFile} \
-          -log.format ${cfg.logFormat} \
-          -log.level ${cfg.logLevel} \
-          -web.listen-address ${optionalString (cfg.listenAddress != null) cfg.listenAddress}:${toString cfg.port}
-      '';
-
-      serviceConfig = {
-        User = cfg.user;
-        Group = cfg.group;
-        Restart  = "always";
-        PrivateTmp = true;
-        WorkingDirectory = "/tmp";
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/monitoring/prometheus/unifi-exporter.nix b/nixos/modules/services/monitoring/prometheus/unifi-exporter.nix
deleted file mode 100644
index 0a56d6ae95a5..000000000000
--- a/nixos/modules/services/monitoring/prometheus/unifi-exporter.nix
+++ /dev/null
@@ -1,105 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  cfg = config.services.prometheus.unifiExporter;
-in {
-  options = {
-    services.prometheus.unifiExporter = {
-      enable = mkEnableOption "prometheus unifi exporter";
-
-      port = mkOption {
-        type = types.int;
-        default = 9130;
-        description = ''
-          Port to listen on.
-        '';
-      };
-
-      unifiAddress = mkOption {
-        type = types.str;
-        example = "https://10.0.0.1:8443";
-        description = ''
-          URL of the UniFi Controller API.
-        '';
-      };
-
-      unifiInsecure = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          If enabled skip the verification of the TLS certificate of the UniFi Controller API.
-          Use with caution.
-        '';
-      };
-      
-      unifiUsername = mkOption {
-        type = types.str;
-        example = "ReadOnlyUser";
-        description = ''
-          username for authentication against UniFi Controller API.
-        '';
-      };
-      
-      unifiPassword = mkOption {
-        type = types.str;
-        description = ''
-          Password for authentication against UniFi Controller API.
-        '';
-      };
-      
-      unifiTimeout = mkOption {
-        type = types.str;
-        default = "5s";
-        example = "2m";
-        description = ''
-          Timeout including unit for UniFi Controller API requests.
-        '';
-      };
-
-      extraFlags = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra commandline options when launching the unifi exporter.
-        '';
-      };
-
-      openFirewall = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Open port in firewall for incoming connections.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
-
-    systemd.services.prometheus-unifi-exporter = {
-      description = "Prometheus exporter for UniFi Controller metrics";
-      unitConfig.Documentation = "https://github.com/mdlayher/unifi_exporter";
-      wantedBy = [ "multi-user.target" ];
-      after = optional config.services.unifi.enable "unifi.service";
-      serviceConfig = {
-        User = "nobody";
-        Restart = "always";
-        PrivateTmp = true;
-        WorkingDirectory = /tmp;
-        ExecStart = ''
-          ${pkgs.prometheus-unifi-exporter}/bin/unifi_exporter \
-            -telemetry.addr :${toString cfg.port} \
-            -unifi.addr ${cfg.unifiAddress} \
-            -unifi.username ${cfg.unifiUsername} \
-            -unifi.password ${cfg.unifiPassword} \
-            -unifi.timeout ${cfg.unifiTimeout} \
-            ${optionalString cfg.unifiInsecure "-unifi.insecure" } \
-            ${concatStringsSep " \\\n  " cfg.extraFlags}
-        '';
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/monitoring/prometheus/varnish-exporter.nix b/nixos/modules/services/monitoring/prometheus/varnish-exporter.nix
deleted file mode 100644
index 143ebb62aeac..000000000000
--- a/nixos/modules/services/monitoring/prometheus/varnish-exporter.nix
+++ /dev/null
@@ -1,61 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-# Shamelessly cribbed from nginx-exporter.nix. ~ C.
-with lib;
-
-let
-  cfg = config.services.prometheus.varnishExporter;
-in {
-  options = {
-    services.prometheus.varnishExporter = {
-      enable = mkEnableOption "prometheus Varnish exporter";
-
-      port = mkOption {
-        type = types.int;
-        default = 9131;
-        description = ''
-          Port to listen on.
-        '';
-      };
-
-      extraFlags = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = ''
-          Extra commandline options when launching the Varnish exporter.
-        '';
-      };
-
-      openFirewall = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Open port in firewall for incoming connections.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
-
-    systemd.services.prometheus-varnish-exporter = {
-      description = "Prometheus exporter for Varnish metrics";
-      unitConfig.Documentation = "https://github.com/jonnenauha/prometheus_varnish_exporter";
-      wantedBy = [ "multi-user.target" ];
-      path = [ pkgs.varnish ];
-      script = ''
-        exec ${pkgs.prometheus-varnish-exporter}/bin/prometheus_varnish_exporter \
-          -web.listen-address :${toString cfg.port} \
-          ${concatStringsSep " \\\n  " cfg.extraFlags}
-      '';
-      serviceConfig = {
-        User = "nobody";
-        Restart = "always";
-        PrivateTmp = true;
-        WorkingDirectory = /tmp;
-        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/monitoring/smartd.nix b/nixos/modules/services/monitoring/smartd.nix
index 1d6940c516a9..fecae4ca1b36 100644
--- a/nixos/modules/services/monitoring/smartd.nix
+++ b/nixos/modules/services/monitoring/smartd.nix
@@ -14,7 +14,7 @@ let
   nx = cfg.notifications.x11;
 
   smartdNotify = pkgs.writeScript "smartd-notify.sh" ''
-    #! ${pkgs.stdenv.shell}
+    #! ${pkgs.runtimeShell}
     ${optionalString nm.enable ''
       {
       ${pkgs.coreutils}/bin/cat << EOF
@@ -64,7 +64,7 @@ let
        "DEVICESCAN ${notifyOpts}${cfg.defaults.autodetected}"}
   '';
 
-  smartdOpts = { name, ... }: {
+  smartdDeviceOpts = { name, ... }: {
 
     options = {
 
@@ -108,6 +108,18 @@ in
         '';
       };
 
+      extraOptions = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = ["-A /var/log/smartd/" "--interval=3600"];
+        description = ''
+          Extra command-line options passed to the <literal>smartd</literal>
+          daemon on startup.
+
+          (See <literal>man 8 smartd</literal>.)
+        '';
+      };
+
       notifications = {
 
         mail = {
@@ -197,7 +209,7 @@ in
       devices = mkOption {
         default = [];
         example = [ { device = "/dev/sda"; } { device = "/dev/sdb"; options = "-d sat"; } ];
-        type = with types; listOf (submodule smartdOpts);
+        type = with types; listOf (submodule smartdDeviceOpts);
         description = "List of devices to monitor.";
       };
 
@@ -222,7 +234,7 @@ in
 
       path = [ pkgs.nettools ]; # for hostname and dnsdomanname calls in smartd
 
-      serviceConfig.ExecStart = "${pkgs.smartmontools}/sbin/smartd --no-fork --configfile=${smartdConf}";
+      serviceConfig.ExecStart = "${pkgs.smartmontools}/sbin/smartd ${lib.concatStringsSep " " cfg.extraOptions} --no-fork --configfile=${smartdConf}";
     };
 
   };
diff --git a/nixos/modules/services/monitoring/statsd.nix b/nixos/modules/services/monitoring/statsd.nix
index df2adb9f2766..7b0e9981cbb1 100644
--- a/nixos/modules/services/monitoring/statsd.nix
+++ b/nixos/modules/services/monitoring/statsd.nix
@@ -9,6 +9,12 @@ let
   isBuiltinBackend = name:
     builtins.elem name [ "graphite" "console" "repeater" ];
 
+  backendsToPackages = let
+    mkMap = list: name:
+      if isBuiltinBackend name then list
+      else list ++ [ pkgs.nodePackages.${name} ];
+  in foldl mkMap [];
+
   configFile = pkgs.writeText "statsd.conf" ''
     {
       address: "${cfg.listenAddress}",
@@ -27,13 +33,21 @@ let
         prettyprint: false
       },
       log: {
-        backend: "syslog"
+        backend: "stdout"
       },
       automaticConfigReload: false${optionalString (cfg.extraConfig != null) ","}
       ${cfg.extraConfig}
     }
   '';
 
+  deps = pkgs.buildEnv {
+    name = "statsd-runtime-deps";
+    pathsToLink = [ "/lib" ];
+    ignoreCollisions = true;
+
+    paths = backendsToPackages cfg.backends;
+  };
+
 in
 
 {
@@ -42,11 +56,7 @@ in
 
   options.services.statsd = {
 
-    enable = mkOption {
-      description = "Whether to enable statsd stats aggregation service";
-      default = false;
-      type = types.bool;
-    };
+    enable = mkEnableOption "statsd";
 
     listenAddress = mkOption {
       description = "Address that statsd listens on over UDP";
@@ -110,6 +120,11 @@ in
 
   config = mkIf cfg.enable {
 
+    assertions = map (backend: {
+      assertion = !isBuiltinBackend backend -> hasAttrByPath [ backend ] pkgs.nodePackages;
+      message = "Only builtin backends (graphite, console, repeater) or backends enumerated in `pkgs.nodePackages` are allowed!";
+    }) cfg.backends;
+
     users.extraUsers = singleton {
       name = "statsd";
       uid = config.ids.uids.statsd;
@@ -120,9 +135,7 @@ in
       description = "Statsd Server";
       wantedBy = [ "multi-user.target" ];
       environment = {
-        NODE_PATH=concatMapStringsSep ":"
-          (pkg: "${builtins.getAttr pkg pkgs.statsd.nodePackages}/lib/node_modules")
-          (filter (name: !isBuiltinBackend name) cfg.backends);
+        NODE_PATH = "${deps}/lib/node_modules";
       };
       serviceConfig = {
         ExecStart = "${pkgs.statsd}/bin/statsd ${configFile}";
diff --git a/nixos/modules/services/network-filesystems/beegfs.nix b/nixos/modules/services/network-filesystems/beegfs.nix
new file mode 100644
index 000000000000..a6a2ec6cbc36
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/beegfs.nix
@@ -0,0 +1,343 @@
+{ config, lib, pkgs, ...} :
+
+with lib;
+
+let
+  cfg = config.services.beegfs;
+
+  # functions for the generations of config files
+
+  configMgmtd = name: cfg: pkgs.writeText "mgmt-${name}.conf" ''
+    storeMgmtdDirectory = ${cfg.mgmtd.storeDir}
+    storeAllowFirstRunInit = false
+    connAuthFile = ${cfg.connAuthFile}
+    connPortShift = ${toString cfg.connPortShift}
+
+    ${cfg.mgmtd.extraConfig}
+  '';
+
+  configAdmon = name: cfg: pkgs.writeText "admon-${name}.conf" ''
+    sysMgmtdHost = ${cfg.mgmtdHost}
+    connAuthFile = ${cfg.connAuthFile}
+    connPortShift = ${toString cfg.connPortShift}
+
+    ${cfg.admon.extraConfig}
+  '';
+
+  configMeta = name: cfg: pkgs.writeText "meta-${name}.conf" ''
+    storeMetaDirectory = ${cfg.meta.storeDir}
+    sysMgmtdHost = ${cfg.mgmtdHost}
+    connAuthFile = ${cfg.connAuthFile}
+    connPortShift = ${toString cfg.connPortShift}
+    storeAllowFirstRunInit = false
+
+    ${cfg.mgmtd.extraConfig}
+  '';
+
+  configStorage = name: cfg: pkgs.writeText "storage-${name}.conf" ''
+    storeStorageDirectory = ${cfg.storage.storeDir}
+    sysMgmtdHost = ${cfg.mgmtdHost}
+    connAuthFile = ${cfg.connAuthFile}
+    connPortShift = ${toString cfg.connPortShift}
+    storeAllowFirstRunInit = false
+
+    ${cfg.storage.extraConfig}
+  '';
+
+  configHelperd = name: cfg: pkgs.writeText "helperd-${name}.conf" ''
+    connAuthFile = ${cfg.connAuthFile}
+    ${cfg.helperd.extraConfig}
+  '';
+
+  configClientFilename = name : "/etc/beegfs/client-${name}.conf";
+
+  configClient = name: cfg: ''
+    sysMgmtdHost = ${cfg.mgmtdHost}
+    connAuthFile = ${cfg.connAuthFile}
+    connPortShift = ${toString cfg.connPortShift}
+
+    ${cfg.client.extraConfig}
+  '';
+
+  serviceList = [
+    { service = "admon"; cfgFile = configAdmon; }
+    { service = "meta"; cfgFile = configMeta; }
+    { service = "mgmtd"; cfgFile = configMgmtd; }
+    { service = "storage"; cfgFile = configStorage; }
+  ];
+
+  # functions to generate systemd.service entries
+
+  systemdEntry = service: cfgFile: (mapAttrs' ( name: cfg:
+    (nameValuePair "beegfs-${service}-${name}" (mkIf cfg."${service}".enable {
+    wantedBy = [ "multi-user.target" ];
+    requires = [ "network-online.target" ];
+    after = [ "network-online.target" ];
+    serviceConfig = rec {
+      ExecStart = ''
+        ${pkgs.beegfs}/bin/beegfs-${service} \
+          cfgFile=${cfgFile name cfg} \
+          pidFile=${PIDFile}
+      '';
+      PIDFile = "/run/beegfs-${service}-${name}.pid";
+      TimeoutStopSec = "300";
+    };
+  }))) cfg);
+
+  systemdHelperd =  mapAttrs' ( name: cfg:
+    (nameValuePair "beegfs-helperd-${name}" (mkIf cfg.client.enable {
+    wantedBy = [ "multi-user.target" ];
+    requires = [ "network-online.target" ];
+    after = [ "network-online.target" ];
+    serviceConfig = rec {
+      ExecStart = ''
+        ${pkgs.beegfs}/bin/beegfs-helperd \
+          cfgFile=${configHelperd name cfg} \
+          pidFile=${PIDFile}
+      '';
+      PIDFile = "/run/beegfs-helperd-${name}.pid";
+      TimeoutStopSec = "300";
+    };
+   }))) cfg;
+
+  # wrappers to beegfs tools. Avoid typing path of config files
+  utilWrappers = mapAttrsToList ( name: cfg:
+      ( pkgs.runCommand "beegfs-utils-${name}" { nativeBuildInputs = [ pkgs.makeWrapper ]; } ''
+        mkdir -p $out/bin
+
+        makeWrapper ${pkgs.beegfs}/bin/beegfs-check-servers \
+                    $out/bin/beegfs-check-servers-${name} \
+                    --add-flags "-c ${configClientFilename name}" \
+                    --prefix PATH : ${lib.makeBinPath [ pkgs.beegfs ]}
+
+        makeWrapper ${pkgs.beegfs}/bin/beegfs-ctl \
+                    $out/bin/beegfs-ctl-${name} \
+                    --add-flags "--cfgFile=${configClientFilename name}"
+
+        makeWrapper ${pkgs.beegfs}/bin/beegfs-ctl \
+                    $out/bin/beegfs-df-${name} \
+                    --add-flags "--cfgFile=${configClientFilename name}" \
+                    --add-flags --listtargets  \
+                    --add-flags --hidenodeid \
+                    --add-flags --pools \
+                    --add-flags --spaceinfo
+
+        makeWrapper ${pkgs.beegfs}/bin/beegfs-fsck \
+                    $out/bin/beegfs-fsck-${name} \
+                    --add-flags "--cfgFile=${configClientFilename name}"
+      ''
+     )) cfg;
+in
+{
+  ###### interface
+
+  options = {
+    services.beegfsEnable = mkEnableOption "BeeGFS";
+
+    services.beegfs = mkOption {
+      default = {};
+      description = ''
+        BeeGFS configurations. Every mount point requires a separate configuration.
+      '';
+      type = with types; attrsOf (submodule ({ config, ... } : {
+        options = {
+          mgmtdHost = mkOption {
+            type = types.str;
+            default = null;
+            example = "master";
+            description = ''Hostname of managament host.'';
+          };
+
+          connAuthFile = mkOption {
+            type = types.str;
+            default = "";
+            example = "/etc/my.key";
+            description = "File containing shared secret authentication.";
+          };
+
+          connPortShift = mkOption {
+            type = types.int;
+            default = 0;
+            example = 5;
+            description = ''
+              For each additional beegfs configuration shift all
+              service TCP/UDP ports by at least 5.
+            '';
+          };
+
+          client = {
+            enable = mkEnableOption "BeeGFS client";
+
+            mount = mkOption {
+              type = types.bool;
+              default = true;
+              description = "Create fstab entry automatically";
+            };
+
+            mountPoint = mkOption {
+              type = types.str;
+              default = "/run/beegfs";
+              description = ''
+                Mount point under which the beegfs filesytem should be mounted.
+                If mounted manually the mount option specifing the config file is needed:
+                cfgFile=/etc/beegfs/beegfs-client-&lt;name&gt;.conf
+              '';
+            };
+
+            extraConfig = mkOption {
+              type = types.lines;
+              default = "";
+              description = ''
+                Additional lines for beegfs-client.conf.
+                See documentation for further details.
+             '';
+            };
+          };
+
+          helperd = {
+            extraConfig = mkOption {
+              type = types.lines;
+              default = "";
+              description = ''
+                Additional lines for beegfs-helperd.conf. See documentation
+                for further details.
+              '';
+            };
+          };
+
+          mgmtd = {
+            enable = mkEnableOption "BeeGFS mgmtd daemon";
+
+            storeDir = mkOption {
+              type = types.path;
+              default = null;
+              example = "/data/beegfs-mgmtd";
+              description = ''
+                Data directory for mgmtd.
+                Must not be shared with other beegfs daemons.
+                This directory must exist and it must be initialized
+                with beegfs-setup-mgmtd, e.g. "beegfs-setup-mgmtd -C -p &lt;storeDir&gt;"
+              '';
+            };
+
+            extraConfig = mkOption {
+              type = types.lines;
+              default = "";
+              description = ''
+                Additional lines for beegfs-mgmtd.conf. See documentation
+                for further details.
+              '';
+            };
+          };
+
+          admon = {
+            enable = mkEnableOption "BeeGFS admon daemon";
+
+            extraConfig = mkOption {
+              type = types.lines;
+              default = "";
+              description = ''
+                Additional lines for beegfs-admon.conf. See documentation
+                for further details.
+              '';
+            };
+          };
+
+          meta = {
+            enable = mkEnableOption "BeeGFS meta data daemon";
+
+            storeDir = mkOption {
+              type = types.path;
+              default = null;
+              example = "/data/beegfs-meta";
+              description = ''
+                Data directory for meta data service.
+                Must not be shared with other beegfs daemons.
+                The underlying filesystem must be mounted with xattr turned on.
+                This directory must exist and it must be initialized
+                with beegfs-setup-meta, e.g.
+                "beegfs-setup-meta -C -s &lt;serviceID&gt; -p &lt;storeDir&gt;"
+              '';
+            };
+
+            extraConfig = mkOption {
+              type = types.str;
+              default = "";
+              description = ''
+                Additional lines for beegfs-meta.conf. See documentation
+                for further details.
+              '';
+            };
+          };
+
+          storage = {
+            enable = mkEnableOption "BeeGFS storage daemon";
+
+            storeDir = mkOption {
+              type = types.path;
+              default = null;
+              example = "/data/beegfs-storage";
+              description = ''
+                Data directories for storage service.
+                Must not be shared with other beegfs daemons.
+                The underlying filesystem must be mounted with xattr turned on.
+                This directory must exist and it must be initialized
+                with beegfs-setup-storage, e.g.
+                "beegfs-setup-storage -C -s &lt;serviceID&gt; -i &lt;storageTargetID&gt; -p &lt;storeDir&gt;"
+              '';
+            };
+
+            extraConfig = mkOption {
+              type = types.str;
+              default = "";
+              description = ''
+                Addional lines for beegfs-storage.conf. See documentation
+                for further details.
+              '';
+            };
+          };
+        };
+      }));
+    };
+  };
+
+  ###### implementation
+
+  config =
+    mkIf config.services.beegfsEnable {
+
+    environment.systemPackages = utilWrappers;
+
+    # Put the client.conf files in /etc since they are needed
+    # by the commandline tools
+    environment.etc = mapAttrs' ( name: cfg:
+      (nameValuePair "beegfs/client-${name}.conf" (mkIf (cfg.client.enable)
+    {
+      enable = true;
+      text = configClient name cfg;
+    }))) cfg;
+
+    # Kernel module, we need it only once per host.
+    boot = mkIf (
+      foldr (a: b: a || b) false
+        (map (x: x.client.enable) (collect (x: x ? client) cfg)))
+    {
+      kernelModules = [ "beegfs" ];
+      extraModulePackages = [ pkgs.linuxPackages.beegfs-module ];
+    };
+
+    # generate fstab entries
+    fileSystems = mapAttrs' (name: cfg:
+      (nameValuePair cfg.client.mountPoint (optionalAttrs cfg.client.mount (mkIf cfg.client.enable {
+      device = "beegfs_nodev";
+      fsType = "beegfs";
+      mountPoint = cfg.client.mountPoint;
+      options = [ "cfgFile=${configClientFilename name}" "_netdev" ];
+    })))) cfg;
+
+    # generate systemd services
+    systemd.services = systemdHelperd //
+      foldr (a: b: a // b) {}
+        (map (x: systemdEntry x.service x.cfgFile) serviceList);
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/ceph.nix b/nixos/modules/services/network-filesystems/ceph.nix
new file mode 100644
index 000000000000..5de8ae79a246
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/ceph.nix
@@ -0,0 +1,371 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  ceph = pkgs.ceph;
+  cfg  = config.services.ceph;
+  # function that translates "camelCaseOptions" to "camel case options", credits to tilpner in #nixos@freenode
+  translateOption = replaceStrings upperChars (map (s: " ${s}") lowerChars);
+  generateDaemonList = (daemonType: daemons: extraServiceConfig:
+    mkMerge (
+      map (daemon: 
+        { "ceph-${daemonType}-${daemon}" = generateServiceFile daemonType daemon cfg.global.clusterName ceph extraServiceConfig; }
+      ) daemons
+    )
+  );
+  generateServiceFile = (daemonType: daemonId: clusterName: ceph: extraServiceConfig: {
+    enable = true;
+    description = "Ceph ${builtins.replaceStrings lowerChars upperChars daemonType} daemon ${daemonId}";
+    after = [ "network-online.target" "local-fs.target" "time-sync.target" ] ++ optional (daemonType == "osd") "ceph-mon.target";
+    wants = [ "network-online.target" "local-fs.target" "time-sync.target" ];
+    partOf = [ "ceph-${daemonType}.target" ];
+    wantedBy = [ "ceph-${daemonType}.target" ];
+
+    serviceConfig = {
+      LimitNOFILE = 1048576;
+      LimitNPROC = 1048576;
+      Environment = "CLUSTER=${clusterName}";
+      ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      PrivateDevices = "yes";
+      PrivateTmp = "true";
+      ProtectHome = "true";
+      ProtectSystem = "full";
+      Restart = "on-failure";
+      StartLimitBurst = "5";
+      StartLimitInterval = "30min";
+      ExecStart = "${ceph.out}/bin/${if daemonType == "rgw" then "radosgw" else "ceph-${daemonType}"} -f --cluster ${clusterName} --id ${if daemonType == "rgw" then "client.${daemonId}" else daemonId} --setuser ceph --setgroup ceph";
+    } // extraServiceConfig
+      // optionalAttrs (daemonType == "osd") { ExecStartPre = "${ceph.out}/libexec/ceph/ceph-osd-prestart.sh --id ${daemonId} --cluster ${clusterName}"; };
+    } // optionalAttrs (builtins.elem daemonType [ "mds" "mon" "rgw" "mgr" ]) { preStart = ''
+        daemonPath="/var/lib/ceph/${if daemonType == "rgw" then "radosgw" else daemonType}/${clusterName}-${daemonId}"
+        if [ ! -d ''$daemonPath ]; then
+          mkdir -m 755 -p ''$daemonPath
+          chown -R ceph:ceph ''$daemonPath 
+        fi
+      '';
+    } // optionalAttrs (daemonType == "osd") { path = [ pkgs.getopt ]; }
+  );
+  generateTargetFile = (daemonType:
+    {
+      "ceph-${daemonType}" = {
+        description = "Ceph target allowing to start/stop all ceph-${daemonType} services at once";
+        partOf = [ "ceph.target" ];
+        before = [ "ceph.target" ];
+      };
+    }
+  );
+in 
+{
+  options.services.ceph = {
+    # Ceph has a monolithic configuration file but different sections for
+    # each daemon, a separate client section and a global section
+    enable = mkEnableOption "Ceph global configuration";
+
+    global = {
+      fsid = mkOption {
+        type = types.str;
+        example = ''
+          433a2193-4f8a-47a0-95d2-209d7ca2cca5
+        '';
+        description = ''
+          Filesystem ID, a generated uuid, its must be generated and set before
+          attempting to start a cluster
+        '';
+      };
+
+      clusterName = mkOption {
+        type = types.str;
+        default = "ceph";
+        description = ''
+          Name of cluster
+        '';
+      };
+
+      monInitialMembers = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          node0, node1, node2 
+        '';
+        description = ''
+          List of hosts that will be used as monitors at startup.
+        '';
+      };
+
+      monHost = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          10.10.0.1, 10.10.0.2, 10.10.0.3
+        '';
+        description = ''
+          List of hostname shortnames/IP addresses of the initial monitors.
+        '';
+      };
+
+      maxOpenFiles = mkOption {
+        type = types.int;
+        default = 131072;
+        description = ''
+          Max open files for each OSD daemon.
+        '';
+      };
+
+      authClusterRequired = mkOption {
+        type = types.enum [ "cephx" "none" ];
+        default = "cephx";
+        description = ''
+          Enables requiring daemons to authenticate with eachother in the cluster.
+        '';
+      };
+
+      authServiceRequired = mkOption {
+        type = types.enum [ "cephx" "none" ];
+        default = "cephx";
+        description = ''
+          Enables requiring clients to authenticate with the cluster to access services in the cluster (e.g. radosgw, mds or osd).
+        '';
+      };
+
+      authClientRequired = mkOption {
+        type = types.enum [ "cephx" "none" ];
+        default = "cephx";
+        description = ''
+          Enables requiring the cluster to authenticate itself to the client.
+        '';
+      };
+
+      publicNetwork = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          10.20.0.0/24, 192.168.1.0/24
+        '';
+        description = ''
+          A comma-separated list of subnets that will be used as public networks in the cluster.
+        '';
+      };
+
+      clusterNetwork = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          10.10.0.0/24, 192.168.0.0/24
+        '';
+        description = ''
+          A comma-separated list of subnets that will be used as cluster networks in the cluster.
+        '';
+      };
+    };
+
+    mgr = {
+      enable = mkEnableOption "Ceph MGR daemon";
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = ''
+          [ "name1" "name2" ];
+        '';
+        description = ''
+          A list of names for manager daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in mgr.name1
+        '';
+      };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = ''
+          Extra configuration to add to the global section for manager daemons.
+        '';
+      };
+    };
+
+    mon = {
+      enable = mkEnableOption "Ceph MON daemon";
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = ''
+          [ "name1" "name2" ];
+        '';
+        description = ''
+          A list of monitor daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in mon.name1
+        '';
+      };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = ''
+          Extra configuration to add to the monitor section.
+        '';
+      };
+    };
+
+    osd = {
+      enable = mkEnableOption "Ceph OSD daemon";
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = ''
+          [ "name1" "name2" ];
+        '';
+        description = ''
+          A list of OSD daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in osd.name1
+        '';
+      };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {
+          "osd journal size" = "10000";
+          "osd pool default size" = "3";
+          "osd pool default min size" = "2";
+          "osd pool default pg num" = "200";
+          "osd pool default pgp num" = "200";
+          "osd crush chooseleaf type" = "1";
+        };
+        description = ''
+          Extra configuration to add to the OSD section.
+        '';
+      };
+    };
+
+    mds = {
+      enable = mkEnableOption "Ceph MDS daemon";
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = ''
+          [ "name1" "name2" ];
+        '';
+        description = ''
+          A list of metadata service daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in mds.name1
+        '';
+      };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = ''
+          Extra configuration to add to the MDS section.
+        '';
+      };
+    };
+
+    rgw = {
+      enable = mkEnableOption "Ceph RadosGW daemon";
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = ''
+          [ "name1" "name2" ];
+        '';
+        description = ''
+          A list of rados gateway daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in client.name1, radosgw daemons
+          aren't daemons to cluster in the sense that OSD, MGR or MON daemons are. They are simply
+          daemons, from ceph, that uses the cluster as a backend.
+        '';
+      };
+    };
+
+    client = {
+      enable = mkEnableOption "Ceph client configuration";
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        example = ''
+          {
+            # This would create a section for a radosgw daemon named node0 and related
+            # configuration for it
+            "client.radosgw.node0" = { "some config option" = "true"; };
+          };
+        '';
+        description = ''
+          Extra configuration to add to the client section. Configuration for rados gateways
+          would be added here, with their own sections, see example.
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.services.ceph.enable {
+    assertions = [
+      { assertion = cfg.global.fsid != "";
+        message = "fsid has to be set to a valid uuid for the cluster to function";
+      }
+      { assertion = cfg.mgr.enable == true;
+        message = "ceph 12.x requires atleast 1 MGR daemon enabled for the cluster to function";
+      }
+      { assertion = cfg.mon.enable == true -> cfg.mon.daemons != [];
+        message = "have to set id of atleast one MON if you're going to enable Monitor";
+      }
+      { assertion = cfg.mds.enable == true -> cfg.mds.daemons != [];
+        message = "have to set id of atleast one MDS if you're going to enable Metadata Service";
+      }
+      { assertion = cfg.osd.enable == true -> cfg.osd.daemons != [];
+        message = "have to set id of atleast one OSD if you're going to enable OSD";
+      }
+      { assertion = cfg.mgr.enable == true -> cfg.mgr.daemons != [];
+        message = "have to set id of atleast one MGR if you're going to enable MGR";
+      }
+    ];
+
+    warnings = optional (cfg.global.monInitialMembers == null) 
+      ''Not setting up a list of members in monInitialMembers requires that you set the host variable for each mon daemon or else the cluster won't function'';
+    
+    environment.etc."ceph/ceph.conf".text = let
+      # Translate camelCaseOptions to the expected camel case option for ceph.conf
+      translatedGlobalConfig = mapAttrs' (name: value: nameValuePair (translateOption name) value) cfg.global;
+      # Merge the extraConfig set for mgr daemons, as mgr don't have their own section
+      globalAndMgrConfig = translatedGlobalConfig // optionalAttrs cfg.mgr.enable cfg.mgr.extraConfig;
+      # Remove all name-value pairs with null values from the attribute set to avoid making empty sections in the ceph.conf
+      globalConfig = mapAttrs' (name: value: nameValuePair (translateOption name) value) (filterAttrs (name: value: value != null) globalAndMgrConfig);
+      totalConfig = {
+          "global" = globalConfig;
+        } // optionalAttrs (cfg.mon.enable && cfg.mon.extraConfig != {}) { "mon" = cfg.mon.extraConfig; }
+          // optionalAttrs (cfg.mds.enable && cfg.mds.extraConfig != {}) { "mds" = cfg.mds.extraConfig; }
+          // optionalAttrs (cfg.osd.enable && cfg.osd.extraConfig != {}) { "osd" = cfg.osd.extraConfig; }
+          // optionalAttrs (cfg.client.enable && cfg.client.extraConfig != {})  cfg.client.extraConfig;
+      in
+        generators.toINI {} totalConfig;
+
+    users.extraUsers = singleton {
+      name = "ceph";
+      uid = config.ids.uids.ceph;
+      description = "Ceph daemon user";
+    };
+
+    users.extraGroups = singleton {
+      name = "ceph";
+      gid = config.ids.gids.ceph;
+    };
+
+    systemd.services = let
+      services = [] 
+        ++ optional cfg.mon.enable (generateDaemonList "mon" cfg.mon.daemons { RestartSec = "10"; }) 
+        ++ optional cfg.mds.enable (generateDaemonList "mds" cfg.mds.daemons { StartLimitBurst = "3"; })
+        ++ optional cfg.osd.enable (generateDaemonList "osd" cfg.osd.daemons { StartLimitBurst = "30"; RestartSec = "20s"; })
+        ++ optional cfg.rgw.enable (generateDaemonList "rgw" cfg.rgw.daemons { })
+        ++ optional cfg.mgr.enable (generateDaemonList "mgr" cfg.mgr.daemons { StartLimitBurst = "3"; });
+      in 
+        mkMerge services;
+
+    systemd.targets = let
+      targets = [
+        { "ceph" = { description = "Ceph target allowing to start/stop all ceph service instances at once"; }; }
+      ] ++ optional cfg.mon.enable (generateTargetFile "mon")
+        ++ optional cfg.mds.enable (generateTargetFile "mds")
+        ++ optional cfg.osd.enable (generateTargetFile "osd")
+        ++ optional cfg.rgw.enable (generateTargetFile "rgw")
+        ++ optional cfg.mgr.enable (generateTargetFile "mgr");
+      in
+        mkMerge targets;
+
+    systemd.tmpfiles.rules = [
+      "d /run/ceph 0770 ceph ceph -"
+    ];
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/davfs2.nix b/nixos/modules/services/network-filesystems/davfs2.nix
new file mode 100644
index 000000000000..6b2a770100c5
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/davfs2.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.davfs2;
+  cfgFile = pkgs.writeText "davfs2.conf" ''
+    dav_user ${cfg.davUser}
+    dav_group ${cfg.davGroup}
+    ${cfg.extraConfig}
+  '';
+in
+{
+  options.services.davfs2 = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to enable davfs2.
+      '';
+    };
+
+    davUser = mkOption {
+      type = types.string;
+      default = "davfs2";
+      description = ''
+        When invoked by root the mount.davfs daemon will run as this user.
+        Value must be given as name, not as numerical id.
+      '';
+    };
+
+    davGroup = mkOption {
+      type = types.string;
+      default = "davfs2";
+      description = ''
+        The group of the running mount.davfs daemon. Ordinary users must be
+        member of this group in order to mount a davfs2 file system. Value must
+        be given as name, not as numerical id.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        kernel_fs coda
+        proxy foo.bar:8080
+        use_locks 0
+      '';
+      description = ''
+        Extra lines appended to the configuration of davfs2.
+      ''  ;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.davfs2 ];
+    environment.etc."davfs2/davfs2.conf".source = cfgFile;
+
+    users.extraGroups = optionalAttrs (cfg.davGroup == "davfs2") (singleton {
+      name = "davfs2";
+      gid = config.ids.gids.davfs2;
+    });
+
+    users.extraUsers = optionalAttrs (cfg.davUser == "davfs2") (singleton {
+      name = "davfs2";
+      createHome = false;
+      group = cfg.davGroup;
+      uid = config.ids.uids.davfs2;
+      description = "davfs2 user";
+    });
+  };
+
+}
diff --git a/nixos/modules/services/network-filesystems/ipfs.nix b/nixos/modules/services/network-filesystems/ipfs.nix
index d4a695ef5880..39a4fd6beff8 100644
--- a/nixos/modules/services/network-filesystems/ipfs.nix
+++ b/nixos/modules/services/network-filesystems/ipfs.nix
@@ -39,8 +39,6 @@ let
     # NB: migration must be performed prior to pre-start, else we get the failure message!
     preStart = ''
       ipfs repo fsck # workaround for BUG #4212 (https://github.com/ipfs/go-ipfs/issues/4214)
-      ipfs --local config Addresses.API ${cfg.apiAddress}
-      ipfs --local config Addresses.Gateway ${cfg.gatewayAddress}
     '' + optionalString cfg.autoMount ''
       ipfs --local config Mounts.FuseAllowOther --json true
       ipfs --local config Mounts.IPFS ${cfg.ipfsMountDir}
@@ -56,7 +54,11 @@ let
               EOF
               ipfs --local config --json "${concatStringsSep "." path}" "$value"
             '')
-            cfg.extraConfig)
+            ({ Addresses.API = cfg.apiAddress;
+               Addresses.Gateway = cfg.gatewayAddress;
+               Addresses.Swarm = cfg.swarmAddress;
+            } //
+            cfg.extraConfig))
         );
     serviceConfig = {
       ExecStart = "${wrapped}/bin/ipfs daemon ${ipfsFlags}";
@@ -140,6 +142,12 @@ in {
         description = "Where IPFS exposes its API to";
       };
 
+      swarmAddress = mkOption {
+        type = types.listOf types.str;
+        default = [ "/ip4/0.0.0.0/tcp/4001" "/ip6/::/tcp/4001" ];
+        description = "Where IPFS listens for incoming p2p connections";
+      };
+
       enableGC = mkOption {
         type = types.bool;
         default = false;
diff --git a/nixos/modules/services/network-filesystems/openafs-client/default.nix b/nixos/modules/services/network-filesystems/openafs-client/default.nix
deleted file mode 100644
index 0946e379e796..000000000000
--- a/nixos/modules/services/network-filesystems/openafs-client/default.nix
+++ /dev/null
@@ -1,99 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-let
-  inherit (lib) mkOption mkIf;
-
-  cfg = config.services.openafsClient;
-
-  cellServDB = pkgs.fetchurl {
-    url = http://dl.central.org/dl/cellservdb/CellServDB.2017-03-14;
-    sha256 = "1197z6c5xrijgf66rhaymnm5cvyg2yiy1i20y4ah4mrzmjx0m7sc";
-  };
-
-  afsConfig = pkgs.runCommand "afsconfig" {} ''
-    mkdir -p $out
-    echo ${cfg.cellName} > $out/ThisCell
-    cp ${cellServDB} $out/CellServDB
-    echo "/afs:${cfg.cacheDirectory}:${cfg.cacheSize}" > $out/cacheinfo
-  '';
-
-  openafsPkgs = config.boot.kernelPackages.openafsClient;
-in
-{
-  ###### interface
-
-  options = {
-
-    services.openafsClient = {
-
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable the OpenAFS client.";
-      };
-
-      cellName = mkOption {
-        default = "grand.central.org";
-        description = "Cell name.";
-      };
-
-      cacheSize = mkOption {
-        default = "100000";
-        description = "Cache size.";
-      };
-
-      cacheDirectory = mkOption {
-        default = "/var/cache/openafs";
-        description = "Cache directory.";
-      };
-
-      crypt = mkOption {
-        default = false;
-        description = "Whether to enable (weak) protocol encryption.";
-      };
-
-      sparse = mkOption {
-        default = false;
-        description = "Minimal cell list in /afs.";
-      };
-
-    };
-  };
-
-
-  ###### implementation
-
-  config = mkIf cfg.enable {
-
-    environment.systemPackages = [ openafsPkgs ];
-
-    environment.etc = [
-      { source = afsConfig;
-        target = "openafs";
-      }
-    ];
-
-    systemd.services.afsd = {
-      description = "AFS client";
-      wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" ];
-      serviceConfig = { RemainAfterExit = true; };
-
-      preStart = ''
-        mkdir -p -m 0755 /afs
-        mkdir -m 0700 -p ${cfg.cacheDirectory}
-        ${pkgs.kmod}/bin/insmod ${openafsPkgs}/lib/openafs/libafs-*.ko || true
-        ${openafsPkgs}/sbin/afsd -confdir ${afsConfig} -cachedir ${cfg.cacheDirectory} ${if cfg.sparse then "-dynroot-sparse" else "-dynroot"} -fakestat -afsdb
-        ${openafsPkgs}/bin/fs setcrypt ${if cfg.crypt then "on" else "off"}
-      '';
-
-      # Doing this in preStop, because after these commands AFS is basically
-      # stopped, so systemd has nothing to do, just noticing it.  If done in
-      # postStop, then we get a hang + kernel oops, because AFS can't be
-      # stopped simply by sending signals to processes.
-      preStop = ''
-        ${pkgs.utillinux}/bin/umount /afs
-        ${openafsPkgs}/sbin/afsd -shutdown
-      '';
-    };
-  };
-}
diff --git a/nixos/modules/services/network-filesystems/openafs/client.nix b/nixos/modules/services/network-filesystems/openafs/client.nix
new file mode 100644
index 000000000000..3826fe3edfd0
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/openafs/client.nix
@@ -0,0 +1,239 @@
+{ config, pkgs, lib, ... }:
+
+with import ./lib.nix { inherit lib; };
+
+let
+  inherit (lib) getBin mkOption mkIf optionalString singleton types;
+
+  cfg = config.services.openafsClient;
+
+  cellServDB = pkgs.fetchurl {
+    url = http://dl.central.org/dl/cellservdb/CellServDB.2017-03-14;
+    sha256 = "1197z6c5xrijgf66rhaymnm5cvyg2yiy1i20y4ah4mrzmjx0m7sc";
+  };
+
+  clientServDB = pkgs.writeText "client-cellServDB-${cfg.cellName}" (mkCellServDB cfg.cellName cfg.cellServDB);
+
+  afsConfig = pkgs.runCommand "afsconfig" {} ''
+    mkdir -p $out
+    echo ${cfg.cellName} > $out/ThisCell
+    cat ${cellServDB} ${clientServDB} > $out/CellServDB
+    echo "${cfg.mountPoint}:${cfg.cache.directory}:${toString cfg.cache.blocks}" > $out/cacheinfo
+  '';
+
+  openafsMod = config.boot.kernelPackages.openafs;
+  openafsBin = lib.getBin pkgs.openafs;
+in
+{
+  ###### interface
+
+  options = {
+
+    services.openafsClient = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Whether to enable the OpenAFS client.";
+      };
+
+      afsdb = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Resolve cells via AFSDB DNS records.";
+      };
+
+      cellName = mkOption {
+        default = "";
+        type = types.str;
+        description = "Cell name.";
+        example = "grand.central.org";
+      };
+
+      cellServDB = mkOption {
+        default = [];
+        type = with types; listOf (submodule { options = cellServDBConfig; });
+        description = ''
+          This cell's database server records, added to the global
+          CellServDB. See CellServDB(5) man page for syntax. Ignored when
+          <literal>afsdb</literal> is set to <literal>true</literal>.
+        '';
+        example = ''
+          [ { ip = "1.2.3.4"; dnsname = "first.afsdb.server.dns.fqdn.org"; }
+            { ip = "2.3.4.5"; dnsname = "second.afsdb.server.dns.fqdn.org"; }
+          ]
+        '';
+      };
+
+      cache = {
+        blocks = mkOption {
+          default = 100000;
+          type = types.int;
+          description = "Cache size in 1KB blocks.";
+        };
+
+        chunksize = mkOption {
+          default = 0;
+          type = types.ints.between 0 30;
+          description = ''
+            Size of each cache chunk given in powers of
+            2. <literal>0</literal> resets the chunk size to its default
+            values (13 (8 KB) for memcache, 18-20 (256 KB to 1 MB) for
+            diskcache). Maximum value is 30. Important performance
+            parameter. Set to higher values when dealing with large files.
+          '';
+        };
+
+        directory = mkOption {
+          default = "/var/cache/openafs";
+          type = types.str;
+          description = "Cache directory.";
+        };
+
+        diskless = mkOption {
+          default = false;
+          type = types.bool;
+          description = ''
+            Use in-memory cache for diskless machines. Has no real
+            performance benefit anymore.
+          '';
+        };
+      };
+
+      crypt = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Whether to enable (weak) protocol encryption.";
+      };
+
+      daemons = mkOption {
+        default = 2;
+        type = types.int;
+        description = ''
+          Number of daemons to serve user requests. Numbers higher than 6
+          usually do no increase performance. Default is sufficient for up
+          to five concurrent users.
+        '';
+      };
+
+      fakestat = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Return fake data on stat() calls. If <literal>true</literal>,
+          always do so. If <literal>false</literal>, only do so for
+          cross-cell mounts (as these are potentially expensive).
+        '';
+      };
+
+      inumcalc = mkOption {
+        default = "compat";
+        type = types.strMatching "compat|md5";
+        description = ''
+          Inode calculation method. <literal>compat</literal> is
+          computationally less expensive, but <literal>md5</literal> greatly
+          reduces the likelihood of inode collisions in larger scenarios
+          involving multiple cells mounted into one AFS space.
+        '';
+      };
+
+      mountPoint = mkOption {
+        default = "/afs";
+        type = types.str;
+        description = ''
+          Mountpoint of the AFS file tree, conventionally
+          <literal>/afs</literal>. When set to a different value, only
+          cross-cells that use the same value can be accessed.
+        '';
+      };
+
+      sparse = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Minimal cell list in /afs.";
+      };
+
+      startDisconnected = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Start up in disconnected mode.  You need to execute
+          <literal>fs disco online</literal> (as root) to switch to
+          connected mode. Useful for roaming devices.
+        '';
+      };
+
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.afsdb || cfg.cellServDB != [];
+        message = "You should specify all cell-local database servers in config.services.openafsClient.cellServDB or set config.services.openafsClient.afsdb.";
+      }
+      { assertion = cfg.cellName != "";
+        message = "You must specify the local cell name in config.services.openafsClient.cellName.";
+      }
+    ];
+
+    environment.systemPackages = [ pkgs.openafs ];
+
+    environment.etc = {
+      clientCellServDB = {
+        source = pkgs.runCommand "CellServDB" {} ''
+          cat ${cellServDB} ${clientServDB} > $out
+        '';
+        target = "openafs/CellServDB";
+        mode = "0644";
+      };
+      clientCell = {
+        text = ''
+          ${cfg.cellName}
+        '';
+        target = "openafs/ThisCell";
+        mode = "0644";
+      };
+    };
+
+    systemd.services.afsd = {
+      description = "AFS client";
+      wantedBy = [ "multi-user.target" ];
+      after = singleton (if cfg.startDisconnected then  "network.target" else "network-online.target");
+      serviceConfig = { RemainAfterExit = true; };
+      restartIfChanged = false;
+
+      preStart = ''
+        mkdir -p -m 0755 ${cfg.mountPoint}
+        mkdir -m 0700 -p ${cfg.cache.directory}
+        ${pkgs.kmod}/bin/insmod ${openafsMod}/lib/modules/*/extra/openafs/libafs.ko.xz
+        ${openafsBin}/sbin/afsd \
+          -mountdir ${cfg.mountPoint} \
+          -confdir ${afsConfig} \
+          ${optionalString (!cfg.cache.diskless) "-cachedir ${cfg.cache.directory}"} \
+          -blocks ${toString cfg.cache.blocks} \
+          -chunksize ${toString cfg.cache.chunksize} \
+          ${optionalString cfg.cache.diskless "-memcache"} \
+          -inumcalc ${cfg.inumcalc} \
+          ${if cfg.fakestat then "-fakestat-all" else "-fakestat"} \
+          ${if cfg.sparse then "-dynroot-sparse" else "-dynroot"} \
+          ${optionalString cfg.afsdb "-afsdb"}
+        ${openafsBin}/bin/fs setcrypt ${if cfg.crypt then "on" else "off"}
+        ${optionalString cfg.startDisconnected "${openafsBin}/bin/fs discon offline"}
+      '';
+
+      # Doing this in preStop, because after these commands AFS is basically
+      # stopped, so systemd has nothing to do, just noticing it.  If done in
+      # postStop, then we get a hang + kernel oops, because AFS can't be
+      # stopped simply by sending signals to processes.
+      preStop = ''
+        ${pkgs.utillinux}/bin/umount ${cfg.mountPoint}
+        ${openafsBin}/sbin/afsd -shutdown
+        ${pkgs.kmod}/sbin/rmmod libafs
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/openafs/lib.nix b/nixos/modules/services/network-filesystems/openafs/lib.nix
new file mode 100644
index 000000000000..ecfc72d2eaf9
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/openafs/lib.nix
@@ -0,0 +1,28 @@
+{ lib, ...}:
+
+let
+  inherit (lib) concatStringsSep mkOption types;
+
+in rec {
+
+  mkCellServDB = cellName: db: ''
+    >${cellName}
+  '' + (concatStringsSep "\n" (map (dbm: if (dbm.ip != "" && dbm.dnsname != "") then dbm.ip + " #" + dbm.dnsname else "")
+                                   db));
+
+  # CellServDB configuration type
+  cellServDBConfig = {
+    ip = mkOption {
+      type = types.str;
+      default = "";
+      example = "1.2.3.4";
+      description = "IP Address of a database server";
+    };
+    dnsname = mkOption {
+      type = types.str;
+      default = "";
+      example = "afs.example.org";
+      description = "DNS full-qualified domain name of a database server";
+    };
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/openafs/server.nix b/nixos/modules/services/network-filesystems/openafs/server.nix
new file mode 100644
index 000000000000..429eb945ac9e
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/openafs/server.nix
@@ -0,0 +1,260 @@
+{ config, pkgs, lib, ... }:
+
+with import ./lib.nix { inherit lib; };
+
+let
+  inherit (lib) concatStringsSep intersperse mapAttrsToList mkForce mkIf mkMerge mkOption optionalString types;
+
+  bosConfig = pkgs.writeText "BosConfig" (''
+    restrictmode 1
+    restarttime 16 0 0 0 0
+    checkbintime 3 0 5 0 0
+  '' + (optionalString cfg.roles.database.enable ''
+    bnode simple vlserver 1
+    parm ${openafsBin}/libexec/openafs/vlserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} ${cfg.roles.database.vlserverArgs}
+    end
+    bnode simple ptserver 1
+    parm ${openafsBin}/libexec/openafs/ptserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} ${cfg.roles.database.ptserverArgs}
+    end
+  '') + (optionalString cfg.roles.fileserver.enable ''
+    bnode dafs dafs 1
+    parm ${openafsBin}/libexec/openafs/dafileserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} -udpsize ${udpSizeStr} ${cfg.roles.fileserver.fileserverArgs}
+    parm ${openafsBin}/libexec/openafs/davolserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} -udpsize ${udpSizeStr} ${cfg.roles.fileserver.volserverArgs}
+    parm ${openafsBin}/libexec/openafs/salvageserver ${cfg.roles.fileserver.salvageserverArgs}
+    parm ${openafsBin}/libexec/openafs/dasalvager ${cfg.roles.fileserver.salvagerArgs}
+    end
+  '') + (optionalString (cfg.roles.database.enable && cfg.roles.backup.enable) ''
+    bnode simple buserver 1
+    parm ${openafsBin}/libexec/openafs/buserver ${cfg.roles.backup.buserverArgs} ${optionalString (cfg.roles.backup.cellServDB != []) "-cellservdb /etc/openafs/backup/"}
+    end
+  ''));
+
+  netInfo = if (cfg.advertisedAddresses != []) then
+    pkgs.writeText "NetInfo" ((concatStringsSep "\nf " cfg.advertisedAddresses) + "\n")
+  else null;
+
+  buCellServDB = pkgs.writeText "backup-cellServDB-${cfg.cellName}" (mkCellServDB cfg.cellName cfg.roles.backup.cellServDB);
+
+  cfg = config.services.openafsServer;
+
+  udpSizeStr = toString cfg.udpPacketSize;
+
+  openafsBin = lib.getBin pkgs.openafs;
+
+in {
+
+  options = {
+
+    services.openafsServer = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Whether to enable the OpenAFS server. An OpenAFS server needs a
+          complex setup. So, be aware that enabling this service and setting
+          some options does not give you a turn-key-ready solution. You need
+          at least a running Kerberos 5 setup, as OpenAFS relies on it for
+          authentication. See the Guide "QuickStartUnix" coming with
+          <literal>pkgs.openafs.doc</literal> for complete setup
+          instructions.
+        '';
+      };
+
+      advertisedAddresses = mkOption {
+        default = [];
+        description = "List of IP addresses this server is advertised under. See NetInfo(5)";
+      };
+
+      cellName = mkOption {
+        default = "";
+        type = types.str;
+        description = "Cell name, this server will serve.";
+        example = "grand.central.org";
+      };
+
+      cellServDB = mkOption {
+        default = [];
+        type = with types; listOf (submodule [ { options = cellServDBConfig;} ]);
+        description = "Definition of all cell-local database server machines.";
+      };
+
+      roles = {
+        fileserver = {
+          enable = mkOption {
+            default = true;
+            type = types.bool;
+            description = "Fileserver role, serves files and volumes from its local storage.";
+          };
+
+          fileserverArgs = mkOption {
+            default = "-vattachpar 128 -vhashsize 11 -L -rxpck 400 -cb 1000000";
+            type = types.str;
+            description = "Arguments to the dafileserver process. See its man page.";
+          };
+
+          volserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the davolserver process. See its man page.";
+            example = "-sync never";
+          };
+
+          salvageserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the salvageserver process. See its man page.";
+            example = "-showlog";
+          };
+
+          salvagerArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the dasalvager process. See its man page.";
+            example = "-showlog -showmounts";
+          };
+        };
+
+        database = {
+          enable = mkOption {
+            default = true;
+            type = types.bool;
+            description = ''
+              Database server role, maintains the Volume Location Database,
+              Protection Database (and Backup Database, see
+              <literal>backup</literal> role). There can be multiple
+              servers in the database role for replication, which then need
+              reliable network connection to each other.
+
+              Servers in this role appear in AFSDB DNS records or the
+              CellServDB.
+            '';
+          };
+
+          vlserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the vlserver process. See its man page.";
+            example = "-rxbind";
+          };
+
+          ptserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the ptserver process. See its man page.";
+            example = "-restricted -default_access S---- S-M---";
+          };
+        };
+
+        backup = {
+          enable = mkOption {
+            default = false;
+            type = types.bool;
+            description = ''
+              Backup server role. Use in conjunction with the
+              <literal>database</literal> role to maintain the Backup
+              Database. Normally only used in conjunction with tape storage
+              or IBM's Tivoli Storage Manager.
+            '';
+          };
+
+          buserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the buserver process. See its man page.";
+            example = "-p 8";
+          };
+
+          cellServDB = mkOption {
+            default = [];
+            type = with types; listOf (submodule [ { options = cellServDBConfig;} ]);
+            description = ''
+              Definition of all cell-local backup database server machines.
+              Use this when your cell uses less backup database servers than
+              other database server machines.
+            '';
+          };
+        };
+      };
+
+      dottedPrincipals= mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          If enabled, allow principal names containing (.) dots. Enabling
+          this has security implications!
+        '';
+      };
+
+      udpPacketSize = mkOption {
+        default = 1310720;
+        type = types.int;
+        description = ''
+          UDP packet size to use in Bytes. Higher values can speed up
+          communications. The default of 1 MB is a sufficient in most
+          cases. Make sure to increase the kernel's UDP buffer size
+          accordingly via <literal>net.core(w|r|opt)mem_max</literal>
+          sysctl.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.cellServDB != [];
+        message = "You must specify all cell-local database servers in config.services.openafsServer.cellServDB.";
+      }
+      { assertion = cfg.cellName != "";
+        message = "You must specify the local cell name in config.services.openafsServer.cellName.";
+      }
+    ];
+
+    environment.systemPackages = [ pkgs.openafs ];
+
+    environment.etc = {
+      bosConfig = {
+        source = bosConfig;
+        target = "openafs/BosConfig";
+        mode = "0644";
+      };
+      cellServDB = {
+        text = mkCellServDB cfg.cellName cfg.cellServDB;
+        target = "openafs/server/CellServDB";
+        mode = "0644";
+      };
+      thisCell = {
+        text = cfg.cellName;
+        target = "openafs/server/ThisCell";
+        mode = "0644";
+      };
+      buCellServDB = {
+        enable = (cfg.roles.backup.cellServDB != []);
+        text = mkCellServDB cfg.cellName cfg.roles.backup.cellServDB;
+        target = "openafs/backup/CellServDB";
+      };
+    };
+
+    systemd.services = {
+      openafs-server = {
+        description = "OpenAFS server";
+        after = [ "syslog.target" "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        restartIfChanged = false;
+        unitConfig.ConditionPathExists = [ "/etc/openafs/server/rxkad.keytab" ];
+        preStart = ''
+          mkdir -m 0755 -p /var/openafs
+          ${optionalString (netInfo != null) "cp ${netInfo} /var/openafs/netInfo"}
+          ${optionalString (cfg.roles.backup.cellServDB != []) "cp ${buCellServDB}"}
+        '';
+        serviceConfig = {
+          ExecStart = "${openafsBin}/bin/bosserver -nofork";
+          ExecStop = "${openafsBin}/bin/bos shutdown localhost -wait -localauth";
+        };
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/samba.nix b/nixos/modules/services/network-filesystems/samba.nix
index 9b9c91a4f167..b23266e8d43a 100644
--- a/nixos/modules/services/network-filesystems/samba.nix
+++ b/nixos/modules/services/network-filesystems/samba.nix
@@ -54,9 +54,12 @@ let
       };
 
       serviceConfig = {
-        ExecStart = "${samba}/sbin/${appName} ${args}";
+        ExecStart = "${samba}/sbin/${appName} --foreground --no-process-group ${args}";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        LimitNOFILE = 16384;
+        PIDFile = "/run/${appName}.pid";
         Type = "notify";
+        NotifyAccess = "all"; #may not do anything...
       };
 
       restartTriggers = [ configFile ];
@@ -230,11 +233,12 @@ in
             after = [ "samba-setup.service" "network.target" ];
             wantedBy = [ "multi-user.target" ];
           };
-
+          # Refer to https://github.com/samba-team/samba/tree/master/packaging/systemd
+          # for correct use with systemd
           services = {
-            "samba-smbd" = daemonService "smbd" "-F";
-            "samba-nmbd" = mkIf cfg.enableNmbd (daemonService "nmbd" "-F");
-            "samba-winbindd" = mkIf cfg.enableWinbindd (daemonService "winbindd" "-F");
+            "samba-smbd" = daemonService "smbd" "";
+            "samba-nmbd" = mkIf cfg.enableNmbd (daemonService "nmbd" "");
+            "samba-winbindd" = mkIf cfg.enableWinbindd (daemonService "winbindd" "");
             "samba-setup" = {
               description = "Samba Setup Task";
               script = setupScript;
diff --git a/nixos/modules/services/network-filesystems/xtreemfs.nix b/nixos/modules/services/network-filesystems/xtreemfs.nix
index 0c6714563d8a..95d7641e8b53 100644
--- a/nixos/modules/services/network-filesystems/xtreemfs.nix
+++ b/nixos/modules/services/network-filesystems/xtreemfs.nix
@@ -11,7 +11,7 @@ let
   home = cfg.homeDir;
 
   startupScript = class: configPath: pkgs.writeScript "xtreemfs-osd.sh" ''
-    #! ${pkgs.stdenv.shell}
+    #! ${pkgs.runtimeShell}
     JAVA_HOME="${pkgs.jdk}"
     JAVADIR="${xtreemfs}/share/java"
     JAVA_CALL="$JAVA_HOME/bin/java -ea -cp $JAVADIR/XtreemFS.jar:$JAVADIR/BabuDB.jar:$JAVADIR/Flease.jar:$JAVADIR/protobuf-java-2.5.0.jar:$JAVADIR/Foundation.jar:$JAVADIR/jdmkrt.jar:$JAVADIR/jdmktk.jar:$JAVADIR/commons-codec-1.3.jar"
diff --git a/nixos/modules/services/network-filesystems/yandex-disk.nix b/nixos/modules/services/network-filesystems/yandex-disk.nix
index 4de206641331..44b0edf62018 100644
--- a/nixos/modules/services/network-filesystems/yandex-disk.nix
+++ b/nixos/modules/services/network-filesystems/yandex-disk.nix
@@ -99,10 +99,10 @@ in
             exit 1
         fi
 
-        ${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${u} \
+        ${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${u} \
           -c '${pkgs.yandex-disk}/bin/yandex-disk token -p ${cfg.password} ${cfg.username} ${dir}/token'
 
-        ${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${u} \
+        ${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${u} \
           -c '${pkgs.yandex-disk}/bin/yandex-disk start --no-daemon -a ${dir}/token -d ${cfg.directory} --exclude-dirs=${cfg.excludes}'
       '';
 
diff --git a/nixos/modules/services/networking/amuled.nix b/nixos/modules/services/networking/amuled.nix
index fc7d56a24fa7..9898f164c5cf 100644
--- a/nixos/modules/services/networking/amuled.nix
+++ b/nixos/modules/services/networking/amuled.nix
@@ -68,7 +68,7 @@ in
       '';
 
       script = ''
-        ${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${user} \
+        ${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${user} \
             -c 'HOME="${cfg.dataDir}" ${pkgs.amuleDaemon}/bin/amuled'
       '';
     };
diff --git a/nixos/modules/services/networking/aria2.nix b/nixos/modules/services/networking/aria2.nix
index ad4ac9bf45e3..df9c92db2e54 100644
--- a/nixos/modules/services/networking/aria2.nix
+++ b/nixos/modules/services/networking/aria2.nix
@@ -10,9 +10,9 @@ let
   settingsDir = "${homeDir}";
   sessionFile = "${homeDir}/aria2.session";
   downloadDir = "${homeDir}/Downloads";
-  
+
   rangesToStringList = map (x: builtins.toString x.from +"-"+ builtins.toString x.to);
-  
+
   settingsFile = pkgs.writeText "aria2.conf"
   ''
     dir=${cfg.downloadDir}
@@ -110,12 +110,12 @@ in
         mkdir -m 0770 -p "${homeDir}"
         chown aria2:aria2 "${homeDir}"
         if [[ ! -d "${config.services.aria2.downloadDir}" ]]
-        then 
+        then
           mkdir -m 0770 -p "${config.services.aria2.downloadDir}"
           chown aria2:aria2 "${config.services.aria2.downloadDir}"
         fi
         if [[ ! -e "${sessionFile}" ]]
-        then 
+        then
           touch "${sessionFile}"
           chown aria2:aria2 "${sessionFile}"
         fi
@@ -132,4 +132,4 @@ in
       };
     };
   };
-}
\ No newline at end of file
+}
diff --git a/nixos/modules/services/networking/bird.nix b/nixos/modules/services/networking/bird.nix
index 1a7a1e24b702..c25bd0fdc541 100644
--- a/nixos/modules/services/networking/bird.nix
+++ b/nixos/modules/services/networking/bird.nix
@@ -7,21 +7,27 @@ let
     let
       cfg = config.services.${variant};
       pkg = pkgs.${variant};
+      birdBin = if variant == "bird6" then "bird6" else "bird";
       birdc = if variant == "bird6" then "birdc6" else "birdc";
+      descr =
+        { bird = "1.9.x with IPv4 suport";
+          bird6 = "1.9.x with IPv6 suport";
+          bird2 = "2.x";
+        }.${variant};
       configFile = pkgs.stdenv.mkDerivation {
         name = "${variant}.conf";
         text = cfg.config;
         preferLocalBuild = true;
         buildCommand = ''
           echo -n "$text" > $out
-          ${pkg}/bin/${variant} -d -p -c $out
+          ${pkg}/bin/${birdBin} -d -p -c $out
         '';
       };
     in {
       ###### interface
       options = {
         services.${variant} = {
-          enable = mkEnableOption "BIRD Internet Routing Daemon";
+          enable = mkEnableOption "BIRD Internet Routing Daemon (${descr})";
           config = mkOption {
             type = types.lines;
             description = ''
@@ -36,12 +42,12 @@ let
       config = mkIf cfg.enable {
         environment.systemPackages = [ pkg ];
         systemd.services.${variant} = {
-          description = "BIRD Internet Routing Daemon";
+          description = "BIRD Internet Routing Daemon (${descr})";
           wantedBy = [ "multi-user.target" ];
           serviceConfig = {
             Type = "forking";
             Restart = "on-failure";
-            ExecStart = "${pkg}/bin/${variant} -c ${configFile} -u ${variant} -g ${variant}";
+            ExecStart = "${pkg}/bin/${birdBin} -c ${configFile} -u ${variant} -g ${variant}";
             ExecReload = "${pkg}/bin/${birdc} configure";
             ExecStop = "${pkg}/bin/${birdc} down";
             CapabilityBoundingSet = [ "CAP_CHOWN" "CAP_FOWNER" "CAP_DAC_OVERRIDE" "CAP_SETUID" "CAP_SETGID"
@@ -56,14 +62,15 @@ let
         users = {
           extraUsers.${variant} = {
             description = "BIRD Internet Routing Daemon user";
-            group = "${variant}";
+            group = variant;
           };
           extraGroups.${variant} = {};
         };
       };
     };
 
-  inherit (config.services) bird bird6;
-in {
-  imports = [(generic "bird") (generic "bird6")];
+in
+
+{
+  imports = map generic [ "bird" "bird6" "bird2" ];
 }
diff --git a/nixos/modules/services/networking/connman.nix b/nixos/modules/services/networking/connman.nix
index 546d27069232..c3ca6fbe725e 100644
--- a/nixos/modules/services/networking/connman.nix
+++ b/nixos/modules/services/networking/connman.nix
@@ -52,6 +52,15 @@ in {
         '';
       };
 
+      extraFlags = mkOption {
+        type = with types; listOf string;
+        default = [ ];
+        example = [ "--nodnsproxy" ];
+        description = ''
+          Extra flags to pass to connmand
+        '';
+      };
+
     };
 
   };
@@ -81,7 +90,7 @@ in {
         Type = "dbus";
         BusName = "net.connman";
         Restart = "on-failure";
-        ExecStart = "${pkgs.connman}/sbin/connmand --config=${configFile} --nodaemon";
+        ExecStart = "${pkgs.connman}/sbin/connmand --config=${configFile} --nodaemon ${toString cfg.extraFlags}";
         StandardOutput = "null";
       };
     };
diff --git a/nixos/modules/services/networking/dante.nix b/nixos/modules/services/networking/dante.nix
index a9a77f3412af..32acce51e692 100644
--- a/nixos/modules/services/networking/dante.nix
+++ b/nixos/modules/services/networking/dante.nix
@@ -47,7 +47,7 @@ in
 
     systemd.services.dante = {
       description   = "Dante SOCKS v4 and v5 compatible proxy server";
-      after         = [ "network.target" ];
+      after         = [ "network-online.target" ];
       wantedBy      = [ "multi-user.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/networking/ddclient.nix b/nixos/modules/services/networking/ddclient.nix
index 9e56545f746c..9a2e13e9553c 100644
--- a/nixos/modules/services/networking/ddclient.nix
+++ b/nixos/modules/services/networking/ddclient.nix
@@ -3,24 +3,24 @@
 let
   cfg = config.services.ddclient;
   boolToStr = bool: if bool then "yes" else "no";
+  dataDir = "/var/lib/ddclient";
 
   configText = ''
     # This file can be used as a template for configFile or is automatically generated by Nix options.
-    daemon=${toString cfg.interval}
-    cache=${cfg.homeDir}/ddclient.cache
-    pid=/run/ddclient/ddclient.pid
-    foreground=NO
+    cache=${dataDir}/ddclient.cache
+    foreground=YES
     use=${cfg.use}
     login=${cfg.username}
     password=${cfg.password}
     protocol=${cfg.protocol}
-    ${let server = cfg.server; in
-      lib.optionalString (server != "") "server=${server}"}
+    ${lib.optionalString (cfg.script != "") "script=${cfg.script}"}
+    ${lib.optionalString (cfg.server != "") "server=${cfg.server}"}
+    ${lib.optionalString (cfg.zone != "")   "zone=${cfg.zone}"}
     ssl=${boolToStr cfg.ssl}
     wildcard=YES
     quiet=${boolToStr cfg.quiet}
     verbose=${boolToStr cfg.verbose}
-    ${cfg.domain}
+    ${lib.concatStringsSep "," cfg.domains}
     ${cfg.extraConfig}
   '';
 
@@ -44,17 +44,11 @@ with lib;
         '';
       };
 
-      homeDir = mkOption {
-        default = "/var/lib/ddclient";
-        type = str;
-        description = "Home directory for the daemon user.";
-      };
-
-      domain = mkOption {
-        default = "";
-        type = str;
+      domains = mkOption {
+        default = [ "" ];
+        type = listOf str;
         description = ''
-          Domain name to synchronize.
+          Domain name(s) to synchronize.
         '';
       };
 
@@ -62,7 +56,7 @@ with lib;
         default = "";
         type = str;
         description = ''
-          Username.
+          User name.
         '';
       };
 
@@ -75,9 +69,12 @@ with lib;
       };
 
       interval = mkOption {
-        default = 600;
-        type = int;
-        description = "The interval at which to run the check and update.";
+        default = "10min";
+        type = str;
+        description = ''
+          The interval at which to run the check and update.
+          See <command>man 7 systemd.time</command> for the format.
+        '';
       };
 
       configFile = mkOption {
@@ -95,7 +92,7 @@ with lib;
         default = "dyndns2";
         type = str;
         description = ''
-          Protocol to use with dynamic DNS provider (see http://sourceforge.net/apps/trac/ddclient/wiki/Protocols).
+          Protocol to use with dynamic DNS provider (see https://sourceforge.net/p/ddclient/wiki/protocols).
         '';
       };
 
@@ -115,11 +112,20 @@ with lib;
         '';
       };
 
-      extraConfig = mkOption {
+
+      quiet = mkOption {
+        default = false;
+        type = bool;
+        description = ''
+          Print no messages for unnecessary updates.
+        '';
+      };
+
+      script = mkOption {
         default = "";
-        type = lines;
+        type = str;
         description = ''
-          Extra configuration. Contents will be added verbatim to the configuration file.
+          script as required by some providers.
         '';
       };
 
@@ -139,11 +145,19 @@ with lib;
         '';
       };
 
-      quiet = mkOption {
-        default = false;
-        type = bool;
+      zone = mkOption {
+        default = "";
+        type = str;
         description = ''
-          Print no messages for unnecessary updates.
+          zone as required by some providers.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = lines;
+        description = ''
+          Extra configuration. Contents will be added verbatim to the configuration file.
         '';
       };
     };
@@ -153,23 +167,8 @@ with lib;
   ###### implementation
 
   config = mkIf config.services.ddclient.enable {
-
-    users = {
-      extraGroups.ddclient.gid = config.ids.gids.ddclient;
-
-      extraUsers.ddclient = {
-        uid = config.ids.uids.ddclient;
-        description = "ddclient daemon user";
-        group = "ddclient";
-        home = cfg.homeDir;
-        createHome = true;
-      };
-    };
-
     environment.etc."ddclient.conf" = {
       enable = cfg.configFile == "/etc/ddclient.conf";
-      uid = config.ids.uids.ddclient;
-      gid = config.ids.gids.ddclient;
       mode = "0600";
       text = configText;
     };
@@ -180,15 +179,22 @@ with lib;
       after = [ "network.target" ];
       restartTriggers = [ config.environment.etc."ddclient.conf".source ];
 
-      serviceConfig = {
-        RuntimeDirectory = "ddclient";
-        # we cannot run in forking mode as it swallows all the program output
-        Type = "simple";
-        User = "ddclient";
-        Group = "ddclient";
-        ExecStart = "${lib.getBin pkgs.ddclient}/bin/ddclient -foreground -file ${cfg.configFile}";
-        ProtectSystem = "full";
-        PrivateTmp = true;
+      serviceConfig = rec {
+        DynamicUser = true;
+        RuntimeDirectory = StateDirectory;
+        StateDirectory = builtins.baseNameOf dataDir;
+        Type = "oneshot";
+        ExecStartPre = "!${lib.getBin pkgs.coreutils}/bin/install -m666 ${cfg.configFile} /run/${RuntimeDirectory}/ddclient.conf";
+        ExecStart = "${lib.getBin pkgs.ddclient}/bin/ddclient -file /run/${RuntimeDirectory}/ddclient.conf";
+      };
+    };
+
+    systemd.timers.ddclient = {
+      description = "Run ddclient";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnBootSec = cfg.interval;
+        OnUnitInactiveSec = cfg.interval;
       };
     };
   };
diff --git a/nixos/modules/services/networking/dhcpcd.nix b/nixos/modules/services/networking/dhcpcd.nix
index d283c7624335..de0aa1a2c2c3 100644
--- a/nixos/modules/services/networking/dhcpcd.nix
+++ b/nixos/modules/services/networking/dhcpcd.nix
@@ -16,7 +16,7 @@ let
   # Don't start dhcpcd on explicitly configured interfaces or on
   # interfaces that are part of a bridge, bond or sit device.
   ignoredInterfaces =
-    map (i: i.name) (filter (i: if i.useDHCP != null then !i.useDHCP else i.ip4 != [ ] || i.ipAddress != null) interfaces)
+    map (i: i.name) (filter (i: if i.useDHCP != null then !i.useDHCP else i.ipv4.addresses != [ ]) interfaces)
     ++ mapAttrsToList (i: _: i) config.networking.sits
     ++ concatLists (attrValues (mapAttrs (n: v: v.interfaces) config.networking.bridges))
     ++ concatLists (attrValues (mapAttrs (n: v: v.interfaces) config.networking.vswitches))
@@ -156,11 +156,11 @@ in
     systemd.services.dhcpcd = let
       cfgN = config.networking;
       hasDefaultGatewaySet = (cfgN.defaultGateway != null && cfgN.defaultGateway.address != "")
-                          || (cfgN.defaultGateway6 != null && cfgN.defaultGateway6.address != "");
+                          && (!cfgN.enableIPv6 || (cfgN.defaultGateway6 != null && cfgN.defaultGateway6.address != ""));
     in
       { description = "DHCP Client";
 
-        wantedBy = optional (!hasDefaultGatewaySet) "network-online.target";
+        wantedBy = [ "multi-user.target" ] ++ optional (!hasDefaultGatewaySet) "network-online.target";
         after = [ "network.target" ];
         wants = [ "network.target" ];
 
diff --git a/nixos/modules/services/networking/dhcpd.nix b/nixos/modules/services/networking/dhcpd.nix
index 2eac6dfec5b7..fd7e317eee95 100644
--- a/nixos/modules/services/networking/dhcpd.nix
+++ b/nixos/modules/services/networking/dhcpd.nix
@@ -36,6 +36,7 @@ let
 
       preStart = ''
         mkdir -m 755 -p ${cfg.stateDir}
+        chown dhcpd:nogroup ${cfg.stateDir}
         touch ${cfg.stateDir}/dhcpd.leases
       '';
 
diff --git a/nixos/modules/services/networking/dnscache.nix b/nixos/modules/services/networking/dnscache.nix
index 379203cd1ab6..ba5c8e2d5e53 100644
--- a/nixos/modules/services/networking/dnscache.nix
+++ b/nixos/modules/services/networking/dnscache.nix
@@ -9,12 +9,12 @@ let
     mkdir -p $out/{servers,ip}
 
     ${concatMapStrings (ip: ''
-      echo > "$out/ip/"${lib.escapeShellArg ip}
+      touch "$out/ip/"${lib.escapeShellArg ip}
     '') cfg.clientIps}
 
     ${concatStrings (mapAttrsToList (host: ips: ''
       ${concatMapStrings (ip: ''
-        echo ${lib.escapeShellArg ip} > "$out/servers/"${lib.escapeShellArg host}
+        echo ${lib.escapeShellArg ip} >> "$out/servers/"${lib.escapeShellArg host}
       '') ips}
     '') cfg.domainServers)}
 
@@ -34,33 +34,49 @@ in {
 
   options = {
     services.dnscache = {
+
       enable = mkOption {
         default = false;
         type = types.bool;
-        description = "Whether to run the dnscache caching dns server";
+        description = "Whether to run the dnscache caching dns server.";
       };
 
       ip = mkOption {
         default = "0.0.0.0";
         type = types.str;
-        description = "IP address on which to listen for connections";
+        description = "IP address on which to listen for connections.";
       };
 
       clientIps = mkOption {
         default = [ "127.0.0.1" ];
         type = types.listOf types.str;
-        description = "client IP addresses (or prefixes) from which to accept connections";
+        description = "Client IP addresses (or prefixes) from which to accept connections.";
         example = ["192.168" "172.23.75.82"];
       };
 
       domainServers = mkOption {
         default = { };
         type = types.attrsOf (types.listOf types.str);
-        description = "table of {hostname: server} pairs to use as authoritative servers for hosts (and subhosts)";
+        description = ''
+          Table of {hostname: server} pairs to use as authoritative servers for hosts (and subhosts).
+          If entry for @ is not specified predefined list of root servers is used.
+        '';
         example = {
-          "example.com" = ["8.8.8.8" "8.8.4.4"];
+          "@" = ["8.8.8.8" "8.8.4.4"];
+          "example.com" = ["192.168.100.100"];
         };
       };
+
+      forwardOnly = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Whether to treat root servers (for @) as caching
+          servers, requesting addresses the same way a client does. This is
+          needed if you want to use e.g. Google DNS as your upstream DNS.
+        '';
+      };
+
     };
   };
 
@@ -82,6 +98,7 @@ in {
       '';
       script = ''
         cd /var/lib/dnscache/
+        ${optionalString cfg.forwardOnly "export FORWARDONLY=1"}
         exec ./run
       '';
     };
diff --git a/nixos/modules/services/networking/dnscrypt-proxy.nix b/nixos/modules/services/networking/dnscrypt-proxy.nix
index ed658258c7f9..857657eea4db 100644
--- a/nixos/modules/services/networking/dnscrypt-proxy.nix
+++ b/nixos/modules/services/networking/dnscrypt-proxy.nix
@@ -10,7 +10,7 @@ let
   # This is somewhat more flexible than preloading the key as an
   # embedded string.
   upstreamResolverListPubKey = pkgs.fetchurl {
-    url = https://raw.githubusercontent.com/jedisct1/dnscrypt-proxy/master/minisign.pub;
+    url = https://raw.githubusercontent.com/dyne/dnscrypt-proxy/master/minisign.pub;
     sha256 = "18lnp8qr6ghfc2sd46nn1rhcpr324fqlvgsp4zaigw396cd7vnnh";
   };
 
@@ -258,9 +258,9 @@ in
         domain=raw.githubusercontent.com
         get="curl -fSs --resolve $domain:443:$(hostip -r 8.8.8.8 $domain | head -1)"
         $get -o dnscrypt-resolvers.csv.tmp \
-          https://$domain/jedisct1/dnscrypt-proxy/master/dnscrypt-resolvers.csv
+          https://$domain/dyne/dnscrypt-proxy/master/dnscrypt-resolvers.csv
         $get -o dnscrypt-resolvers.csv.minisig.tmp \
-          https://$domain/jedisct1/dnscrypt-proxy/master/dnscrypt-resolvers.csv.minisig
+          https://$domain/dyne/dnscrypt-proxy/master/dnscrypt-resolvers.csv.minisig
         mv dnscrypt-resolvers.csv.minisig{.tmp,}
         if ! minisign -q -V -p ${upstreamResolverListPubKey} \
           -m dnscrypt-resolvers.csv.tmp -x dnscrypt-resolvers.csv.minisig ; then
diff --git a/nixos/modules/services/networking/dnscrypt-wrapper.nix b/nixos/modules/services/networking/dnscrypt-wrapper.nix
index 23cc92946e41..bf13d5c6f5fe 100644
--- a/nixos/modules/services/networking/dnscrypt-wrapper.nix
+++ b/nixos/modules/services/networking/dnscrypt-wrapper.nix
@@ -145,6 +145,16 @@ in {
     };
     users.groups.dnscrypt-wrapper = { };
 
+    security.polkit.extraConfig = ''
+      // Allow dnscrypt-wrapper user to restart dnscrypt-wrapper.service
+      polkit.addRule(function(action, subject) {
+          if (action.id == "org.freedesktop.systemd1.manage-units" &&
+              action.lookup("unit") == "dnscrypt-wrapper.service" &&
+              subject.user == "dnscrypt-wrapper") {
+              return polkit.Result.YES;
+          }
+        });
+    '';
 
     systemd.services.dnscrypt-wrapper = {
       description = "dnscrypt-wrapper daemon";
diff --git a/nixos/modules/services/networking/firefox/sync-server.nix b/nixos/modules/services/networking/firefox/sync-server.nix
index a9f3fd65d76b..97d223a56cab 100644
--- a/nixos/modules/services/networking/firefox/sync-server.nix
+++ b/nixos/modules/services/networking/firefox/sync-server.nix
@@ -33,6 +33,8 @@ let
 in
 
 {
+  meta.maintainers = with lib.maintainers; [ nadrieril ];
+
   options = {
     services.firefox.syncserver = {
       enable = mkOption {
@@ -70,18 +72,6 @@ in
         '';
       };
 
-      user = mkOption {
-        type = types.str;
-        default = "syncserver";
-        description = "User account under which syncserver runs.";
-      };
-
-      group = mkOption {
-        type = types.str;
-        default = "syncserver";
-        description = "Group account under which syncserver runs.";
-      };
-
       publicUrl = mkOption {
         type = types.str;
         default = "http://localhost:5000/";
@@ -137,7 +127,9 @@ in
   config = mkIf cfg.enable {
 
     systemd.services.syncserver = let
-      syncServerEnv = pkgs.python.withPackages(ps: with ps; [ syncserver pasteScript ]);
+      syncServerEnv = pkgs.python.withPackages(ps: with ps; [ syncserver pasteScript requests ]);
+      user = "syncserver";
+      group = "syncserver";
     in {
       after = [ "network.target" ];
       description = "Firefox Sync Server";
@@ -145,43 +137,43 @@ in
       path = [ pkgs.coreutils syncServerEnv ];
 
       serviceConfig = {
-        User = cfg.user;
-        Group = cfg.group;
+        User = user;
+        Group = group;
         PermissionsStartOnly = true;
       };
 
       preStart = ''
         if ! test -e ${cfg.privateConfig}; then
-          mkdir -m 700 -p $(dirname ${cfg.privateConfig})
+          mkdir -p $(dirname ${cfg.privateConfig})
           echo  > ${cfg.privateConfig} '[syncserver]'
+          chmod 600 ${cfg.privateConfig}
           echo >> ${cfg.privateConfig} "secret = $(head -c 20 /dev/urandom | sha1sum | tr -d ' -')"
         fi
-        chown ${cfg.user}:${cfg.group} ${cfg.privateConfig}
+        chmod 600 ${cfg.privateConfig}
+        chmod 755 $(dirname ${cfg.privateConfig})
+        chown ${user}:${group} ${cfg.privateConfig}
+
       '' + optionalString (cfg.sqlUri == defaultSqlUri) ''
         if ! test -e $(dirname ${defaultDbLocation}); then
           mkdir -m 700 -p $(dirname ${defaultDbLocation})
-          chown ${cfg.user}:${cfg.group} $(dirname ${defaultDbLocation})
+          chown ${user}:${group} $(dirname ${defaultDbLocation})
         fi
+
         # Move previous database file if it exists
         oldDb="/var/db/firefox-sync-server.db"
         if test -f $oldDb; then
           mv $oldDb ${defaultDbLocation}
-          chown ${cfg.user}:${cfg.group} ${defaultDbLocation}
+          chown ${user}:${group} ${defaultDbLocation}
         fi
       '';
       serviceConfig.ExecStart = "${syncServerEnv}/bin/paster serve ${syncServerIni}";
     };
 
-    users.extraUsers = optionalAttrs (cfg.user == "syncserver")
-      (singleton {
-        name = "syncserver";
-        group = cfg.group;
-        isSystemUser = true;
-      });
-
-    users.extraGroups = optionalAttrs (cfg.group == "syncserver")
-      (singleton {
-        name = "syncserver";
-      });
+    users.users.syncserver = {
+      group = "syncserver";
+      isSystemUser = true;
+    };
+
+    users.groups.syncserver = {};
   };
 }
diff --git a/nixos/modules/services/networking/firewall.nix b/nixos/modules/services/networking/firewall.nix
index 9bd88ca1707b..20c0b0acf165 100644
--- a/nixos/modules/services/networking/firewall.nix
+++ b/nixos/modules/services/networking/firewall.nix
@@ -54,7 +54,7 @@ let
     '';
 
   writeShScript = name: text: let dir = pkgs.writeScriptBin name ''
-    #! ${pkgs.stdenv.shell} -e
+    #! ${pkgs.runtimeShell} -e
     ${text}
   ''; in "${dir}/bin/${name}";
 
@@ -125,6 +125,9 @@ let
       ip46tables -t raw -N nixos-fw-rpfilter 2> /dev/null || true
       ip46tables -t raw -A nixos-fw-rpfilter -m rpfilter ${optionalString (cfg.checkReversePath == "loose") "--loose"} -j RETURN
 
+      # Allows this host to act as a DHCP4 client without first having to use APIPA
+      iptables -t raw -A nixos-fw-rpfilter -p udp --sport 67 --dport 68 -j RETURN
+
       # Allows this host to act as a DHCPv4 server
       iptables -t raw -A nixos-fw-rpfilter -s 0.0.0.0 -d 255.255.255.255 -p udp --sport 68 --dport 67 -j RETURN
 
diff --git a/nixos/modules/services/networking/flashpolicyd.nix b/nixos/modules/services/networking/flashpolicyd.nix
index 5ba85178179b..5b83ce131389 100644
--- a/nixos/modules/services/networking/flashpolicyd.nix
+++ b/nixos/modules/services/networking/flashpolicyd.nix
@@ -22,7 +22,7 @@ let
 
   flashpolicydWrapper = pkgs.writeScriptBin "flashpolicyd"
     ''
-      #! ${pkgs.stdenv.shell}
+      #! ${pkgs.runtimeShell}
       exec ${flashpolicyd}/Perl_xinetd/in.flashpolicyd.pl \
         --file=${pkgs.writeText "flashpolixy.xml" cfg.policy} \
         2> /dev/null
diff --git a/nixos/modules/services/networking/freeradius.nix b/nixos/modules/services/networking/freeradius.nix
new file mode 100644
index 000000000000..45cba1ce2770
--- /dev/null
+++ b/nixos/modules/services/networking/freeradius.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.freeradius;
+
+  freeradiusService = cfg:
+  {
+    description = "FreeRadius server";
+    wantedBy = ["multi-user.target"];
+    after = ["network-online.target"];
+    wants = ["network-online.target"];
+    preStart = ''
+      ${pkgs.freeradius}/bin/radiusd -C -d ${cfg.configDir} -l stdout
+    '';
+
+    serviceConfig = {
+        ExecStart = "${pkgs.freeradius}/bin/radiusd -f -d ${cfg.configDir} -l stdout -xx";
+        ExecReload = [
+          "${pkgs.freeradius}/bin/radiusd -C -d ${cfg.configDir} -l stdout"
+          "${pkgs.coreutils}/bin/kill -HUP $MAINPID"
+        ];
+        User = "radius";
+        ProtectSystem = "full";
+        ProtectHome = "on";
+        Restart = "on-failure";
+        RestartSec = 2;
+    };
+  };
+
+  freeradiusConfig = {
+    enable = mkEnableOption "the freeradius server";
+
+    configDir = mkOption {
+      type = types.path;
+      default = "/etc/raddb";
+      description = ''
+        The path of the freeradius server configuration directory.
+      '';
+    };
+
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.freeradius = freeradiusConfig;
+  };
+
+
+  ###### implementation
+
+  config = mkIf (cfg.enable) {
+
+    users = {
+      extraUsers.radius = {
+        /*uid = config.ids.uids.radius;*/
+        description = "Radius daemon user";
+      };
+    };
+
+    systemd.services.freeradius = freeradiusService cfg;
+
+  };
+
+}
diff --git a/nixos/modules/services/networking/gnunet.nix b/nixos/modules/services/networking/gnunet.nix
index 03ee54af4334..02cd53c6fa38 100644
--- a/nixos/modules/services/networking/gnunet.nix
+++ b/nixos/modules/services/networking/gnunet.nix
@@ -137,6 +137,8 @@ in
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       path = [ pkgs.gnunet pkgs.miniupnpc ];
+      environment.TMPDIR = "/tmp";
+      serviceConfig.PrivateTemp = true;
       serviceConfig.ExecStart = "${pkgs.gnunet}/lib/gnunet/libexec/gnunet-service-arm -c ${configFile}";
       serviceConfig.User = "gnunet";
       serviceConfig.UMask = "0007";
diff --git a/nixos/modules/services/networking/i2pd.nix b/nixos/modules/services/networking/i2pd.nix
index ca2e2a065dcf..8f5aeee4a16b 100644
--- a/nixos/modules/services/networking/i2pd.nix
+++ b/nixos/modules/services/networking/i2pd.nix
@@ -126,6 +126,7 @@ let
         [${tun.name}]
         type = client
         destination = ${tun.destination}
+        destinationport = ${toString tun.destinationPort}
         keys = ${tun.keys}
         address = ${tun.address}
         port = ${toString tun.port}
@@ -137,15 +138,15 @@ let
       '')
     }
     ${flip concatMapStrings
-      (collect (tun: tun ? port && tun ? host) cfg.inTunnels)
-      (tun: let portStr = toString tun.port; in ''
+      (collect (tun: tun ? port && tun ? address) cfg.inTunnels)
+      (tun: ''
         [${tun.name}]
         type = server
         destination = ${tun.destination}
         keys = ${tun.keys}
         host = ${tun.address}
-        port = ${tun.port}
-        inport = ${tun.inPort}
+        port = ${toString tun.port}
+        inport = ${toString tun.inPort}
         accesslist = ${builtins.concatStringsSep "," tun.accessList}
       '')
     }
@@ -405,7 +406,13 @@ in
         default = {};
         type = with types; loaOf (submodule (
           { name, config, ... }: {
-            options = commonTunOpts name;
+            options = {
+              destinationPort = mkOption {
+                type = types.int;
+                default = 0;
+                description = "Connect to particular port at destination.";
+              };
+            } // commonTunOpts name;
             config = {
               name = mkDefault name;
             };
diff --git a/nixos/modules/services/networking/iwd.nix b/nixos/modules/services/networking/iwd.nix
index 23787bce9911..344212ad8329 100644
--- a/nixos/modules/services/networking/iwd.nix
+++ b/nixos/modules/services/networking/iwd.nix
@@ -26,7 +26,7 @@ in {
       wants = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
 
-      serviceConfig.ExecStart = "${pkgs.iwd}/bin/iwd";
+      serviceConfig.ExecStart = "${pkgs.iwd}/libexec/iwd";
     };
   };
 
diff --git a/nixos/modules/services/networking/kresd.nix b/nixos/modules/services/networking/kresd.nix
index 18e2ab9aebf1..aac02b811d71 100644
--- a/nixos/modules/services/networking/kresd.nix
+++ b/nixos/modules/services/networking/kresd.nix
@@ -43,7 +43,16 @@ in
       type = with types; listOf str;
       default = [ "::1" "127.0.0.1" ];
       description = ''
-        What addresses the server should listen on.
+        What addresses the server should listen on. (UDP+TCP 53)
+      '';
+    };
+    listenTLS = mkOption {
+      type = with types; listOf str;
+      default = [];
+      example = [ "198.51.100.1:853" "[2001:db8::1]:853" "853" ];
+      description = ''
+        Addresses on which kresd should provide DNS over TLS (see RFC 7858).
+        For detailed syntax see ListenStream in man systemd.socket.
       '';
     };
     # TODO: perhaps options for more common stuff like cache size or forwarding
@@ -72,6 +81,19 @@ in
         (iface: if elem ":" (stringToCharacters iface) then "[${iface}]:53" else "${iface}:53")
         cfg.interfaces;
       socketConfig.ListenDatagram = listenStreams;
+      socketConfig.FreeBind = true;
+    };
+
+    systemd.sockets.kresd-tls = mkIf (cfg.listenTLS != []) rec {
+      wantedBy = [ "sockets.target" ];
+      before = wantedBy;
+      partOf = [ "kresd.socket" ];
+      listenStreams = cfg.listenTLS;
+      socketConfig = {
+        FileDescriptorName = "tls";
+        FreeBind = true;
+        Service = "kresd.service";
+      };
     };
 
     systemd.sockets.kresd-control = rec {
@@ -82,20 +104,11 @@ in
       socketConfig = {
         FileDescriptorName = "control";
         Service = "kresd.service";
-        SocketMode = "0660"; # only root user/group may connect
+        SocketMode = "0660"; # only root user/group may connect and control kresd
       };
     };
 
-    # Create the cacheDir; tmpfiles don't work on nixos-rebuild switch.
-    systemd.services.kresd-cachedir = {
-      serviceConfig.Type = "oneshot";
-      script = ''
-        if [ ! -d '${cfg.cacheDir}' ]; then
-          mkdir -p '${cfg.cacheDir}'
-          chown kresd:kresd '${cfg.cacheDir}'
-        fi
-      '';
-    };
+    systemd.tmpfiles.rules = [ "d '${cfg.cacheDir}' 0770 kresd kresd - -" ];
 
     systemd.services.kresd = {
       description = "Knot-resolver daemon";
@@ -104,16 +117,17 @@ in
         User = "kresd";
         Type = "notify";
         WorkingDirectory = cfg.cacheDir;
+        Restart = "on-failure";
+        Sockets = [ "kresd.socket" "kresd-control.socket" ]
+          ++ optional (cfg.listenTLS != []) "kresd-tls.socket";
       };
 
+      # Trust anchor goes from dns-root-data by default.
       script = ''
-        exec '${package}/bin/kresd' --config '${configFile}' \
-          -k '${cfg.cacheDir}/root.key'
+        exec '${package}/bin/kresd' --config '${configFile}' --forks=1
       '';
 
-      after = [ "kresd-cachedir.service" ];
-      requires = [ "kresd.socket" "kresd-cachedir.service" ];
-      wantedBy = [ "sockets.target" ];
+      requires = [ "kresd.socket" ];
     };
   };
 }
diff --git a/nixos/modules/services/networking/lldpd.nix b/nixos/modules/services/networking/lldpd.nix
index ba4e1b1542fe..db1534edfd7c 100644
--- a/nixos/modules/services/networking/lldpd.nix
+++ b/nixos/modules/services/networking/lldpd.nix
@@ -24,6 +24,7 @@ in
       description = "lldpd user";
       group = "_lldpd";
       home = "/var/run/lldpd";
+      isSystemUser = true;
     };
     users.extraGroups._lldpd = {};
 
diff --git a/nixos/modules/services/networking/monero.nix b/nixos/modules/services/networking/monero.nix
new file mode 100644
index 000000000000..31379189f5de
--- /dev/null
+++ b/nixos/modules/services/networking/monero.nix
@@ -0,0 +1,238 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg     = config.services.monero;
+  dataDir = "/var/lib/monero";
+
+  listToConf = option: list:
+    concatMapStrings (value: "${option}=${value}\n") list;
+
+  login = (cfg.rpc.user != null && cfg.rpc.password != null);
+
+  configFile = with cfg; pkgs.writeText "monero.conf" ''
+    log-file=/dev/stdout
+    data-dir=${dataDir}
+
+    ${optionalString mining.enable ''
+      start-mining=${mining.address}
+      mining-threads=${toString mining.threads}
+    ''}
+
+    rpc-bind-ip=${rpc.address}
+    rpc-bind-port=${toString rpc.port}
+    ${optionalString login ''
+      rpc-login=${rpc.user}:${rpc.password}
+    ''}
+    ${optionalString rpc.restricted ''
+      restrict-rpc=1
+    ''}
+
+    limit-rate-up=${toString limits.upload}
+    limit-rate-down=${toString limits.download}
+    max-concurrency=${toString limits.threads}
+    block-sync-size=${toString limits.syncSize}
+
+    ${listToConf "add-peer" extraNodes}
+    ${listToConf "add-priority-node" priorityNodes}
+    ${listToConf "add-exclusive-node" exclusiveNodes}
+
+    ${extraConfig}
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.monero = {
+
+      enable = mkEnableOption "Monero node daemon.";
+
+      mining.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to mine moneroj.
+        '';
+      };
+
+      mining.address = mkOption {
+        type = types.str;
+        default = "";
+        description = ''
+          Monero address where to send mining rewards.
+        '';
+      };
+
+      mining.threads = mkOption {
+        type = types.addCheck types.int (x: x>=0);
+        default = 0;
+        description = ''
+          Number of threads used for mining.
+          Set to <literal>0</literal> to use all available.
+        '';
+      };
+
+      rpc.user = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          User name for RPC connections.
+        '';
+      };
+
+      rpc.password = mkOption {
+        type = types.str;
+        default = null;
+        description = ''
+          Password for RPC connections.
+        '';
+      };
+
+      rpc.address = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = ''
+          IP address the RPC server will bind to.
+        '';
+      };
+
+      rpc.port = mkOption {
+        type = types.int;
+        default = 18081;
+        description = ''
+          Port the RPC server will bind to.
+        '';
+      };
+
+      rpc.restricted = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to restrict RPC to view only commands.
+        '';
+      };
+
+      limits.upload = mkOption {
+        type = types.addCheck types.int (x: x>=-1);
+        default = -1;
+        description = ''
+          Limit of the upload rate in kB/s.
+          Set to <literal>-1</literal> to leave unlimited.
+        '';
+      };
+
+      limits.download = mkOption {
+        type = types.addCheck types.int (x: x>=-1);
+        default = -1;
+        description = ''
+          Limit of the download rate in kB/s.
+          Set to <literal>-1</literal> to leave unlimited.
+        '';
+      };
+
+      limits.threads = mkOption {
+        type = types.addCheck types.int (x: x>=0);
+        default = 0;
+        description = ''
+          Maximum number of threads used for a parallel job.
+          Set to <literal>0</literal> to leave unlimited.
+        '';
+      };
+
+      limits.syncSize = mkOption {
+        type = types.addCheck types.int (x: x>=0);
+        default = 0;
+        description = ''
+          Maximum number of blocks to sync at once.
+          Set to <literal>0</literal> for adaptive.
+        '';
+      };
+
+      extraNodes = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = ''
+          List of additional peer IP addresses to add to the local list.
+        '';
+      };
+
+      priorityNodes = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = ''
+          List of peer IP addresses to connect to and
+          attempt to keep the connection open.
+        '';
+      };
+
+      exclusiveNodes = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = ''
+          List of peer IP addresses to connect to *only*.
+          If given the other peer options will be ignored.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra lines to be added verbatim to monerod configuration.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.extraUsers = singleton {
+      name = "monero";
+      uid  = config.ids.uids.monero;
+      description = "Monero daemon user";
+      home = dataDir;
+      createHome = true;
+    };
+
+    users.extraGroups = singleton {
+      name = "monero";
+      gid  = config.ids.gids.monero;
+    };
+
+    systemd.services.monero = {
+      description = "monero daemon";
+      after    = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User  = "monero";
+        Group = "monero";
+        ExecStart = "${pkgs.monero}/bin/monerod --config-file=${configFile} --non-interactive";
+        Restart = "always";
+        SuccessExitStatus = [ 0 1 ];
+      };
+    };
+
+   assertions = singleton {
+     assertion = cfg.mining.enable -> cfg.mining.address != "";
+     message   = ''
+       You need a Monero address to receive mining rewards:
+       specify one using option monero.mining.address.
+    '';
+   };
+
+  };
+
+}
+
diff --git a/nixos/modules/services/networking/mosquitto.nix b/nixos/modules/services/networking/mosquitto.nix
index 81915b5a2ef8..d8135f4d0ffa 100644
--- a/nixos/modules/services/networking/mosquitto.nix
+++ b/nixos/modules/services/networking/mosquitto.nix
@@ -12,6 +12,10 @@ let
     keyfile ${cfg.ssl.keyfile}
   '';
 
+  passwordConf = optionalString cfg.checkPasswords ''
+    password_file ${cfg.dataDir}/passwd
+  '';
+
   mosquittoConf = pkgs.writeText "mosquitto.conf" ''
     pid_file /run/mosquitto/pid
     acl_file ${aclFile}
@@ -19,6 +23,7 @@ let
     allow_anonymous ${boolToString cfg.allowAnonymous}
     bind_address ${cfg.host}
     port ${toString cfg.port}
+    ${passwordConf}
     ${listenerConf}
     ${cfg.extraConf}
   '';
@@ -153,6 +158,15 @@ in
         '';
       };
 
+      checkPasswords = mkOption {
+        default = false;
+        example = true;
+        type = types.bool;
+        description = ''
+          Refuse connection when clients provide incorrect passwords.
+        '';
+      };
+
       extraConf = mkOption {
         default = "";
         type = types.lines;
@@ -198,7 +212,7 @@ in
       '' + concatStringsSep "\n" (
         mapAttrsToList (n: c:
           if c.hashedPassword != null then
-            "echo '${n}:${c.hashedPassword}' > ${cfg.dataDir}/passwd"
+            "echo '${n}:${c.hashedPassword}' >> ${cfg.dataDir}/passwd"
           else optionalString (c.password != null)
             "${pkgs.mosquitto}/bin/mosquitto_passwd -b ${cfg.dataDir}/passwd ${n} ${c.password}"
         ) cfg.users);
diff --git a/nixos/modules/services/networking/murmur.nix b/nixos/modules/services/networking/murmur.nix
index 13d7c3254f9d..873d62dbf341 100644
--- a/nixos/modules/services/networking/murmur.nix
+++ b/nixos/modules/services/networking/murmur.nix
@@ -80,7 +80,7 @@ in
 
       pidfile = mkOption {
         type = types.path;
-        default = "/tmp/murmurd.pid";
+        default = "/run/murmur/murmurd.pid";
         description = "Path to PID file for Murmur daemon.";
       };
 
@@ -252,6 +252,7 @@ in
 
       serviceConfig = {
         Type      = "forking";
+        RuntimeDirectory = "murmur";
         PIDFile   = cfg.pidfile;
         Restart   = "always";
         User      = "murmur";
diff --git a/nixos/modules/services/networking/nat.nix b/nixos/modules/services/networking/nat.nix
index 366bb2ed7a80..da3827c35e63 100644
--- a/nixos/modules/services/networking/nat.nix
+++ b/nixos/modules/services/networking/nat.nix
@@ -19,6 +19,8 @@ let
     iptables -w -t nat -D POSTROUTING -j nixos-nat-post 2>/dev/null || true
     iptables -w -t nat -F nixos-nat-post 2>/dev/null || true
     iptables -w -t nat -X nixos-nat-post 2>/dev/null || true
+
+    ${cfg.extraStopCommands}
   '';
 
   setupNat = ''
@@ -51,8 +53,40 @@ let
         -i ${cfg.externalInterface} -p ${fwd.proto} \
         --dport ${builtins.toString fwd.sourcePort} \
         -j DNAT --to-destination ${fwd.destination}
+
+      ${concatMapStrings (loopbackip:
+        let
+          m                = builtins.match "([0-9.]+):([0-9-]+)" fwd.destination;
+          destinationIP    = if (m == null) then throw "bad ip:ports `${fwd.destination}'" else elemAt m 0;
+          destinationPorts = if (m == null) then throw "bad ip:ports `${fwd.destination}'" else elemAt m 1;
+        in ''
+          # Allow connections to ${loopbackip}:${toString fwd.sourcePort} from the host itself
+          iptables -w -t nat -A OUTPUT \
+            -d ${loopbackip} -p ${fwd.proto} \
+            --dport ${builtins.toString fwd.sourcePort} \
+            -j DNAT --to-destination ${fwd.destination}
+
+          # Allow connections to ${loopbackip}:${toString fwd.sourcePort} from other hosts behind NAT
+          iptables -w -t nat -A nixos-nat-pre \
+            -d ${loopbackip} -p ${fwd.proto} \
+            --dport ${builtins.toString fwd.sourcePort} \
+            -j DNAT --to-destination ${fwd.destination}
+
+          iptables -w -t nat -A nixos-nat-post \
+            -d ${destinationIP} -p ${fwd.proto} \
+            --dport ${destinationPorts} \
+            -j SNAT --to-source ${loopbackip}
+        '') fwd.loopbackIPs}
     '') cfg.forwardPorts}
 
+    ${optionalString (cfg.dmzHost != null) ''
+      iptables -w -t nat -A nixos-nat-pre \
+        -i ${cfg.externalInterface} -j DNAT \
+        --to-destination ${cfg.dmzHost}
+    ''}
+
+    ${cfg.extraCommands}
+
     # Append our chains to the nat tables
     iptables -w -t nat -A PREROUTING -j nixos-nat-pre
     iptables -w -t nat -A POSTROUTING -j nixos-nat-post
@@ -125,15 +159,15 @@ in
       type = with types; listOf (submodule {
         options = {
           sourcePort = mkOption {
-            type = types.int;
+            type = types.either types.int (types.strMatching "[[:digit:]]+:[[:digit:]]+");
             example = 8080;
-            description = "Source port of the external interface";
+            description = "Source port of the external interface; to specify a port range, use a string with a colon (e.g. \"60000:61000\")";
           };
 
           destination = mkOption {
             type = types.str;
             example = "10.0.0.1:80";
-            description = "Forward connection to destination ip:port";
+            description = "Forward connection to destination ip:port; to specify a port range, use ip:start-end";
           };
 
           proto = mkOption {
@@ -142,6 +176,13 @@ in
             example = "udp";
             description = "Protocol of forwarded connection";
           };
+
+          loopbackIPs = mkOption {
+            type = types.listOf types.str;
+            default = [];
+            example = literalExample ''[ "55.1.2.3" ]'';
+            description = "Public IPs for NAT reflection; for connections to `loopbackip:sourcePort' from the host itself and from other hosts behind NAT";
+          };
         };
       });
       default = [];
@@ -153,6 +194,39 @@ in
         '';
     };
 
+    networking.nat.dmzHost = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.0.0.1";
+      description =
+        ''
+          The local IP address to which all traffic that does not match any
+          forwarding rule is forwarded.
+        '';
+    };
+
+    networking.nat.extraCommands = mkOption {
+      type = types.lines;
+      default = "";
+      example = "iptables -A INPUT -p icmp -j ACCEPT";
+      description =
+        ''
+          Additional shell commands executed as part of the nat
+          initialisation script.
+        '';
+    };
+
+    networking.nat.extraStopCommands = mkOption {
+      type = types.lines;
+      default = "";
+      example = "iptables -D INPUT -p icmp -j ACCEPT || true";
+      description =
+        ''
+          Additional shell commands executed as part of the nat
+          teardown script.
+        '';
+    };
+
   };
 
 
diff --git a/nixos/modules/services/networking/networkmanager.nix b/nixos/modules/services/networking/networkmanager.nix
index 6bdae32f72bb..10e96eb40362 100644
--- a/nixos/modules/services/networking/networkmanager.nix
+++ b/nixos/modules/services/networking/networkmanager.nix
@@ -133,10 +133,9 @@ in {
       basePackages = mkOption {
         type = types.attrsOf types.package;
         default = { inherit networkmanager modemmanager wpa_supplicant
-                            networkmanager_openvpn networkmanager_vpnc
-                            networkmanager_openconnect networkmanager_fortisslvpn
-                            networkmanager_pptp networkmanager_l2tp
-                            networkmanager_iodine; };
+                            networkmanager-openvpn networkmanager-vpnc
+                            networkmanager-openconnect networkmanager-fortisslvpn
+                            networkmanager-l2tp networkmanager-iodine; };
         internal = true;
       };
 
@@ -241,6 +240,19 @@ in {
           A list of scripts which will be executed in response to  network  events.
         '';
       };
+
+      enableStrongSwan = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable the StrongSwan plugin.
+          </para><para>
+          If you enable this option the
+          <literal>networkmanager_strongswan</literal> plugin will be added to
+          the <option>networking.networkmanager.packages</option> option
+          so you don't need to to that yourself.
+        '';
+      };
     };
   };
 
@@ -254,34 +266,29 @@ in {
       message = "You can not use networking.networkmanager with networking.wireless";
     }];
 
-    boot.kernelModules = [ "ppp_mppe" ]; # Needed for most (all?) PPTP VPN connections.
-
     environment.etc = with cfg.basePackages; [
       { source = configFile;
         target = "NetworkManager/NetworkManager.conf";
       }
-      { source = "${networkmanager_openvpn}/etc/NetworkManager/VPN/nm-openvpn-service.name";
+      { source = "${networkmanager-openvpn}/etc/NetworkManager/VPN/nm-openvpn-service.name";
         target = "NetworkManager/VPN/nm-openvpn-service.name";
       }
-      { source = "${networkmanager_vpnc}/etc/NetworkManager/VPN/nm-vpnc-service.name";
+      { source = "${networkmanager-vpnc}/etc/NetworkManager/VPN/nm-vpnc-service.name";
         target = "NetworkManager/VPN/nm-vpnc-service.name";
       }
-      { source = "${networkmanager_openconnect}/etc/NetworkManager/VPN/nm-openconnect-service.name";
+      { source = "${networkmanager-openconnect}/etc/NetworkManager/VPN/nm-openconnect-service.name";
         target = "NetworkManager/VPN/nm-openconnect-service.name";
       }
-      { source = "${networkmanager_fortisslvpn}/etc/NetworkManager/VPN/nm-fortisslvpn-service.name";
+      { source = "${networkmanager-fortisslvpn}/etc/NetworkManager/VPN/nm-fortisslvpn-service.name";
         target = "NetworkManager/VPN/nm-fortisslvpn-service.name";
       }
-      { source = "${networkmanager_pptp}/etc/NetworkManager/VPN/nm-pptp-service.name";
-        target = "NetworkManager/VPN/nm-pptp-service.name";
-      }
-      { source = "${networkmanager_l2tp}/etc/NetworkManager/VPN/nm-l2tp-service.name";
+      { source = "${networkmanager-l2tp}/etc/NetworkManager/VPN/nm-l2tp-service.name";
         target = "NetworkManager/VPN/nm-l2tp-service.name";
       }
       { source = "${networkmanager_strongswan}/etc/NetworkManager/VPN/nm-strongswan-service.name";
         target = "NetworkManager/VPN/nm-strongswan-service.name";
       }
-      { source = "${networkmanager_iodine}/etc/NetworkManager/VPN/nm-iodine-service.name";
+      { source = "${networkmanager-iodine}/etc/NetworkManager/VPN/nm-iodine-service.name";
         target = "NetworkManager/VPN/nm-iodine-service.name";
       }
     ] ++ optional (cfg.appendNameservers == [] || cfg.insertNameservers == [])
@@ -322,6 +329,7 @@ in {
 
       preStart = ''
         mkdir -m 700 -p /etc/NetworkManager/system-connections
+        mkdir -m 700 -p /etc/ipsec.d
         mkdir -m 755 -p ${stateDirs}
       '';
     };
@@ -333,13 +341,13 @@ in {
       wireless.enable = lib.mkDefault false;
     };
 
-    powerManagement.resumeCommands = ''
-      ${config.systemd.package}/bin/systemctl restart network-manager
-    '';
-
     security.polkit.extraConfig = polkitConf;
 
-    services.dbus.packages = cfg.packages;
+    networking.networkmanager.packages =
+      mkIf cfg.enableStrongSwan [ pkgs.networkmanager_strongswan ];
+
+    services.dbus.packages =
+      optional cfg.enableStrongSwan pkgs.strongswanNM ++ cfg.packages;
 
     services.udev.packages = cfg.packages;
   };
diff --git a/nixos/modules/services/networking/nftables.nix b/nixos/modules/services/networking/nftables.nix
index 56b942054140..ad7c013a5449 100644
--- a/nixos/modules/services/networking/nftables.nix
+++ b/nixos/modules/services/networking/nftables.nix
@@ -116,7 +116,7 @@ in
           include "${cfg.rulesetFile}"
         '';
         checkScript = pkgs.writeScript "nftables-check" ''
-          #! ${pkgs.stdenv.shell} -e
+          #! ${pkgs.runtimeShell} -e
           if $(${pkgs.kmod}/bin/lsmod | grep -q ip_tables); then
             echo "Unload ip_tables before using nftables!" 1>&2
             exit 1
diff --git a/nixos/modules/services/networking/nix-serve.nix b/nixos/modules/services/networking/nix-serve.nix
index 3e865e3b76a8..8499e7c0f7c4 100644
--- a/nixos/modules/services/networking/nix-serve.nix
+++ b/nixos/modules/services/networking/nix-serve.nix
@@ -55,6 +55,8 @@ in
       environment.NIX_SECRET_KEY_FILE = cfg.secretKeyFile;
 
       serviceConfig = {
+        Restart = "always";
+        RestartSec = "5s";
         ExecStart = "${pkgs.nix-serve}/bin/nix-serve " +
           "--listen ${cfg.bindAddress}:${toString cfg.port} ${cfg.extraParams}";
         User = "nix-serve";
diff --git a/nixos/modules/services/networking/nixops-dns.nix b/nixos/modules/services/networking/nixops-dns.nix
new file mode 100644
index 000000000000..2bb1263b7fa2
--- /dev/null
+++ b/nixos/modules/services/networking/nixops-dns.nix
@@ -0,0 +1,79 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  pkg = pkgs.nixops-dns;
+  cfg = config.services.nixops-dns;
+in
+
+{
+  options = {
+    services.nixops-dns = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable the nixops-dns resolution
+          of NixOps virtual machines via dnsmasq and fake domain name.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        description = ''
+          The user the nixops-dns daemon should run as.
+          This should be the user, which is also used for nixops and
+          have the .nixops directory in its home.
+        '';
+      };
+
+      domain = mkOption {
+        type = types.str;
+        description = ''
+          Fake domain name to resolve to NixOps virtual machines.
+
+          For example "ops" will resolve "vm.ops".
+        '';
+        example = "ops";
+        default = "ops";
+      };
+
+      dnsmasq = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Enable dnsmasq forwarding to nixops-dns. This allows to use
+          nixops-dns for `services.nixops-dns.domain` resolution
+          while forwarding the rest of the queries to original resolvers.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.nixops-dns = {
+      description = "nixops-dns: DNS server for resolving NixOps machines";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        ExecStart="${pkg}/bin/nixops-dns --domain=.${cfg.domain}";
+      };
+    };
+
+    services.dnsmasq = mkIf cfg.dnsmasq {
+      enable = true;
+      resolveLocalQueries = true;
+      servers = [
+        "/${cfg.domain}/127.0.0.1#5300"
+      ];
+      extraConfig = ''
+        bind-interfaces
+        listen-address=127.0.0.1
+      '';
+    };
+
+  };
+}
diff --git a/nixos/modules/services/networking/nsd.nix b/nixos/modules/services/networking/nsd.nix
index c8b8ed547ebb..4241e6fcceab 100644
--- a/nixos/modules/services/networking/nsd.nix
+++ b/nixos/modules/services/networking/nsd.nix
@@ -11,6 +11,8 @@ let
 
   # build nsd with the options needed for the given config
   nsdPkg = pkgs.nsd.override {
+    configFile = "${configFile}/nsd.conf";
+
     bind8Stats = cfg.bind8Stats;
     ipv6 = cfg.ipv6;
     ratelimit = cfg.ratelimit.enable;
@@ -788,6 +790,8 @@ in
 
   config = mkIf cfg.enable {
 
+    environment.systemPackages = [ nsdPkg ];
+
     users.extraGroups = singleton {
       name = username;
       gid = config.ids.gids.nsd;
@@ -845,4 +849,6 @@ in
     };
 
   };
+
+  meta.maintainers = with lib.maintainers; [ hrdinka ];
 }
diff --git a/nixos/modules/services/networking/openvpn.nix b/nixos/modules/services/networking/openvpn.nix
index 3fbf5a9f0227..a418839d22b8 100644
--- a/nixos/modules/services/networking/openvpn.nix
+++ b/nixos/modules/services/networking/openvpn.nix
@@ -50,6 +50,11 @@ let
               "up ${pkgs.writeScript "openvpn-${name}-up" upScript}"}
           ${optionalString (cfg.down != "" || cfg.updateResolvConf)
               "down ${pkgs.writeScript "openvpn-${name}-down" downScript}"}
+          ${optionalString (cfg.authUserPass != null)
+              "auth-user-pass ${pkgs.writeText "openvpn-credentials-${name}" ''
+                ${cfg.authUserPass.username}
+                ${cfg.authUserPass.password}
+              ''}"}
         '';
 
     in {
@@ -60,7 +65,7 @@ let
 
       path = [ pkgs.iptables pkgs.iproute pkgs.nettools ];
 
-      serviceConfig.ExecStart = "@${openvpn}/sbin/openvpn openvpn --config ${configFile}";
+      serviceConfig.ExecStart = "@${openvpn}/sbin/openvpn openvpn --suppress-timestamps --config ${configFile}";
       serviceConfig.Restart = "always";
       serviceConfig.Type = "notify";
     };
@@ -161,6 +166,29 @@ in
             '';
           };
 
+          authUserPass = mkOption {
+            default = null;
+            description = ''
+              This option can be used to store the username / password credentials
+              with the "auth-user-pass" authentication method.
+
+              WARNING: Using this option will put the credentials WORLD-READABLE in the Nix store!
+            '';
+            type = types.nullOr (types.submodule {
+
+              options = {
+                username = mkOption {
+                  description = "The username to store inside the credentials file.";
+                  type = types.string;
+                };
+
+                password = mkOption {
+                  description = "The password to store inside the credentials file.";
+                  type = types.string;
+                };
+              };
+            });
+          };
         };
 
       });
diff --git a/nixos/modules/services/networking/prosody.nix b/nixos/modules/services/networking/prosody.nix
index fb9c9dc67f24..1b4f81f6b56e 100644
--- a/nixos/modules/services/networking/prosody.nix
+++ b/nixos/modules/services/networking/prosody.nix
@@ -10,98 +10,229 @@ let
 
     options = {
 
-      # TODO: require attribute
       key = mkOption {
-        type = types.str;
-        description = "Path to the key file";
+        type = types.path;
+        description = "Path to the key file.";
       };
 
-      # TODO: require attribute
+      # TODO: rename to certificate to match the prosody config
       cert = mkOption {
-        type = types.str;
-        description = "Path to the certificate file";
+        type = types.path;
+        description = "Path to the certificate file.";
       };
+
+      extraOptions = mkOption {
+        type = types.attrs;
+        default = {};
+        description = "Extra SSL configuration options.";
+      };
+
     };
   };
 
   moduleOpts = {
-
+    # Generally required
     roster = mkOption {
+      type = types.bool;
       default = true;
       description = "Allow users to have a roster";
     };
 
     saslauth = mkOption {
+      type = types.bool;
       default = true;
       description = "Authentication for clients and servers. Recommended if you want to log in.";
     };
 
     tls = mkOption {
+      type = types.bool;
       default = true;
       description = "Add support for secure TLS on c2s/s2s connections";
     };
 
     dialback = mkOption {
+      type = types.bool;
       default = true;
       description = "s2s dialback support";
     };
 
     disco = mkOption {
+      type = types.bool;
       default = true;
       description = "Service discovery";
     };
 
-    legacyauth = mkOption {
+    # Not essential, but recommended
+    carbons = mkOption {
+      type = types.bool;
       default = true;
-      description = "Legacy authentication. Only used by some old clients and bots";
+      description = "Keep multiple clients in sync";
     };
 
+    pep = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Enables users to publish their mood, activity, playing music and more";
+    };
+
+    private = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Private XML storage (for room bookmarks, etc.)";
+    };
+
+    blocklist = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Allow users to block communications with other users";
+    };
+
+    vcard = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Allow users to set vCards";
+    };
+
+    # Nice to have
     version = mkOption {
+      type = types.bool;
       default = true;
       description = "Replies to server version requests";
     };
 
     uptime = mkOption {
+      type = types.bool;
       default = true;
       description = "Report how long server has been running";
     };
 
     time = mkOption {
+      type = types.bool;
       default = true;
       description = "Let others know the time here on this server";
     };
 
     ping = mkOption {
+      type = types.bool;
       default = true;
       description = "Replies to XMPP pings with pongs";
     };
 
-    console = mkOption {
+    register = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Allow users to register on this server using a client and change passwords";
+    };
+
+    mam = mkOption {
+      type = types.bool;
       default = false;
-      description = "telnet to port 5582";
+      description = "Store messages in an archive and allow users to access it";
     };
 
+    # Admin interfaces
+    admin_adhoc = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Allows administration via an XMPP client that supports ad-hoc commands";
+    };
+
+    admin_telnet = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Opens telnet console interface on localhost port 5582";
+    };
+
+    # HTTP modules
     bosh = mkOption {
+      type = types.bool;
       default = false;
       description = "Enable BOSH clients, aka 'Jabber over HTTP'";
     };
 
-    httpserver = mkOption {
+    websocket = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Enable WebSocket support";
+    };
+
+    http_files = mkOption {
+      type = types.bool;
       default = false;
       description = "Serve static files from a directory over HTTP";
     };
 
-    websocket = mkOption {
+    # Other specific functionality
+    limits = mkOption {
+      type = types.bool;
       default = false;
-      description = "Enable WebSocket support";
+      description = "Enable bandwidth limiting for XMPP connections";
+    };
+
+    groups = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Shared roster support";
+    };
+
+    server_contact_info = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Publish contact information for this service";
+    };
+
+    announce = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Send announcement to all online users";
+    };
+
+    welcome = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Welcome users who register accounts";
+    };
+
+    watchregistrations = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Alert admins of registrations";
+    };
+
+    motd = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Send a message to users when they log in";
+    };
+
+    legacyauth = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Legacy authentication. Only used by some old clients and bots";
+    };
+
+    proxy65 = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Enables a file transfer proxy service which clients behind NAT can use";
     };
 
   };
 
-  createSSLOptsStr = o:
-    if o ? key && o ? cert then
-      ''ssl = { key = "${o.key}"; certificate = "${o.cert}"; };''
-    else "";
+  toLua = x:
+    if builtins.isString x then ''"${x}"''
+    else if builtins.isBool x then (if x == true then "true" else "false")
+    else if builtins.isInt x then toString x
+    else if builtins.isList x then ''{ ${lib.concatStringsSep ", " (map (n: toLua n) x) } }''
+    else throw "Invalid Lua value";
+
+  createSSLOptsStr = o: ''
+    ssl = {
+      key = "${o.key}";
+      certificate = "${o.cert}";
+      ${concatStringsSep "\n" (mapAttrsToList (name: value: "${name} = ${toLua value};") o.extraOptions)}
+    };
+  '';
 
   vHostOpts = { ... }: {
 
@@ -114,18 +245,20 @@ let
       };
 
       enabled = mkOption {
+        type = types.bool;
         default = false;
         description = "Whether to enable the virtual host";
       };
 
       ssl = mkOption {
-        description = "Paths to SSL files";
+        type = types.nullOr (types.submodule sslOpts);
         default = null;
-        options = [ sslOpts ];
+        description = "Paths to SSL files";
       };
 
       extraConfig = mkOption {
-        default = '''';
+        type = types.lines;
+        default = "";
         description = "Additional virtual host specific configuration";
       };
 
@@ -144,20 +277,113 @@ in
     services.prosody = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = "Whether to enable the prosody server";
       };
 
+      package = mkOption {
+        type = types.package;
+        description = "Prosody package to use";
+        default = pkgs.prosody;
+        defaultText = "pkgs.prosody";
+        example = literalExample ''
+          pkgs.prosody.override {
+            withExtraLibs = [ pkgs.luaPackages.lpty ];
+            withCommunityModules = [ "auth_external" ];
+          };
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.string;
+        description = "Directory where Prosody stores its data";
+        default = "/var/lib/prosody";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "prosody";
+        description = "User account under which prosody runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "prosody";
+        description = "Group account under which prosody runs.";
+      };
+
       allowRegistration = mkOption {
+        type = types.bool;
         default = false;
         description = "Allow account creation";
       };
 
+      c2sRequireEncryption = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Force clients to use encrypted connections? This option will
+          prevent clients from authenticating unless they are using encryption.
+        '';
+      };
+
+      s2sRequireEncryption = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Force servers to use encrypted connections? This option will
+          prevent servers from authenticating unless they are using encryption.
+          Note that this is different from authentication.
+        '';
+      };
+
+      s2sSecureAuth = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Force certificate authentication for server-to-server connections?
+          This provides ideal security, but requires servers you communicate
+          with to support encryption AND present valid, trusted certificates.
+          For more information see https://prosody.im/doc/s2s#security
+        '';
+      };
+
+      s2sInsecureDomains = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "insecure.example.com" ];
+        description = ''
+          Some servers have invalid or self-signed certificates. You can list
+          remote domains here that will not be required to authenticate using
+          certificates. They will be authenticated using DNS instead, even
+          when s2s_secure_auth is enabled.
+        '';
+      };
+
+      s2sSecureDomains = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "jabber.org" ];
+        description = ''
+          Even if you leave s2s_secure_auth disabled, you can still require valid
+          certificates for some domains by specifying a list here.
+        '';
+      };
+
+
       modules = moduleOpts;
 
       extraModules = mkOption {
+        type = types.listOf types.str;
+        default = [];
         description = "Enable custom modules";
+      };
+
+      extraPluginPaths = mkOption {
+        type = types.listOf types.path;
         default = [];
+        description = "Addtional path in which to look find plugins/modules";
       };
 
       virtualHosts = mkOption {
@@ -183,20 +409,21 @@ in
       };
 
       ssl = mkOption {
-        description = "Paths to SSL files";
+        type = types.nullOr (types.submodule sslOpts);
         default = null;
-        options = [ sslOpts ];
+        description = "Paths to SSL files";
       };
 
       admins = mkOption {
-        description = "List of administrators of the current host";
-        example = [ "admin1@example.com" "admin2@example.com" ];
+        type = types.listOf types.str;
         default = [];
+        example = [ "admin1@example.com" "admin2@example.com" ];
+        description = "List of administrators of the current host";
       };
 
       extraConfig = mkOption {
         type = types.lines;
-        default = '''';
+        default = "";
         description = "Additional prosody configuration";
       };
 
@@ -208,37 +435,47 @@ in
 
   config = mkIf cfg.enable {
 
-    environment.systemPackages = [ pkgs.prosody ];
+    environment.systemPackages = [ cfg.package ];
 
     environment.etc."prosody/prosody.cfg.lua".text = ''
 
-      pidfile = "/var/lib/prosody/prosody.pid"
-
+      pidfile = "/run/prosody/prosody.pid"
 
       log = "*syslog"
 
-      data_path = "/var/lib/prosody"
-
-      allow_registration = ${boolToString cfg.allowRegistration};
-
-      ${ optionalString cfg.modules.console "console_enabled = true;" }
+      data_path = "${cfg.dataDir}"
+      plugin_paths = {
+        ${lib.concatStringsSep ", " (map (n: "\"${n}\"") cfg.extraPluginPaths) }
+      }
 
       ${ optionalString  (cfg.ssl != null) (createSSLOptsStr cfg.ssl) }
 
-      admins = { ${lib.concatStringsSep ", " (map (n: "\"${n}\"") cfg.admins) } };
+      admins = ${toLua cfg.admins}
+
+      -- we already build with libevent, so we can just enable it for a more performant server
+      use_libevent = true
 
       modules_enabled = {
 
         ${ lib.concatStringsSep "\n\ \ " (lib.mapAttrsToList
-          (name: val: optionalString val ''"${name}";'')
+          (name: val: optionalString val "${toLua name};")
         cfg.modules) }
+        ${ lib.concatStringsSep "\n" (map (x: "${toLua x};") cfg.package.communityModules)}
+        ${ lib.concatStringsSep "\n" (map (x: "${toLua x};") cfg.extraModules)}
+      };
 
-        ${ optionalString cfg.allowRegistration "\"register\"\;" }
+      allow_registration = ${toLua cfg.allowRegistration}
 
-        ${ lib.concatStringsSep "\n" (map (x: "\"${x}\";") cfg.extraModules)}
+      c2s_require_encryption = ${toLua cfg.c2sRequireEncryption}
+
+      s2s_require_encryption = ${toLua cfg.s2sRequireEncryption}
+
+      s2s_secure_auth = ${toLua cfg.s2sSecureAuth}
+
+      s2s_insecure_domains = ${toLua cfg.s2sInsecureDomains}
+
+      s2s_secure_domains = ${toLua cfg.s2sSecureDomains}
 
-        "posix";
-      };
 
       ${ cfg.extraConfig }
 
@@ -250,30 +487,32 @@ in
         '') cfg.virtualHosts) }
     '';
 
-    users.extraUsers.prosody = {
+    users.extraUsers.prosody = mkIf (cfg.user == "prosody") {
       uid = config.ids.uids.prosody;
       description = "Prosody user";
       createHome = true;
-      group = "prosody";
-      home = "/var/lib/prosody";
+      inherit (cfg) group;
+      home = "${cfg.dataDir}";
     };
 
-    users.extraGroups.prosody = {
+    users.extraGroups.prosody = mkIf (cfg.group == "prosody") {
       gid = config.ids.gids.prosody;
     };
 
     systemd.services.prosody = {
-
       description = "Prosody XMPP server";
       after = [ "network-online.target" ];
       wants = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ config.environment.etc."prosody/prosody.cfg.lua".source ];
       serviceConfig = {
-        User = "prosody";
-        PIDFile = "/var/lib/prosody/prosody.pid";
-        ExecStart = "${pkgs.prosody}/bin/prosodyctl start";
+        User = cfg.user;
+        Group = cfg.group;
+        Type = "forking";
+        RuntimeDirectory = [ "prosody" ];
+        PIDFile = "/run/prosody/prosody.pid";
+        ExecStart = "${cfg.package}/bin/prosodyctl start";
       };
-
     };
 
   };
diff --git a/nixos/modules/services/networking/quagga.nix b/nixos/modules/services/networking/quagga.nix
index aab58cc77b90..22204e53203c 100644
--- a/nixos/modules/services/networking/quagga.nix
+++ b/nixos/modules/services/networking/quagga.nix
@@ -133,7 +133,7 @@ in
     users.groups = {
       quagga = {};
       # Members of the quaggavty group can use vtysh to inspect the Quagga daemons
-      quaggavty = {};
+      quaggavty = { members = [ "quagga" ]; };
     };
 
     systemd.services =
diff --git a/nixos/modules/services/networking/radvd.nix b/nixos/modules/services/networking/radvd.nix
index 0199502163a3..85d7f9e4a41b 100644
--- a/nixos/modules/services/networking/radvd.nix
+++ b/nixos/modules/services/networking/radvd.nix
@@ -59,24 +59,11 @@ in
 
     systemd.services.radvd =
       { description = "IPv6 Router Advertisement Daemon";
-
         wantedBy = [ "multi-user.target" ];
-
         after = [ "network.target" ];
-
-        path = [ pkgs.radvd ];
-
-        preStart = ''
-          mkdir -m 755 -p /run/radvd
-          chown radvd /run/radvd
-        '';
-
         serviceConfig =
-          { ExecStart = "@${pkgs.radvd}/sbin/radvd radvd"
-              + " -p /run/radvd/radvd.pid -m syslog -u radvd -C ${confFile}";
+          { ExecStart = "@${pkgs.radvd}/bin/radvd radvd -n -u radvd -C ${confFile}";
             Restart = "always";
-            Type = "forking";
-            PIDFile = "/run/radvd/radvd.pid";
           };
       };
 
diff --git a/nixos/modules/services/networking/rdnssd.nix b/nixos/modules/services/networking/rdnssd.nix
index 95833d31e99d..a102242eae71 100644
--- a/nixos/modules/services/networking/rdnssd.nix
+++ b/nixos/modules/services/networking/rdnssd.nix
@@ -6,7 +6,7 @@
 with lib;
 let
   mergeHook = pkgs.writeScript "rdnssd-merge-hook" ''
-    #! ${pkgs.stdenv.shell} -e
+    #! ${pkgs.runtimeShell} -e
     ${pkgs.openresolv}/bin/resolvconf -u
   '';
 in
diff --git a/nixos/modules/services/networking/resilio.nix b/nixos/modules/services/networking/resilio.nix
index 6d2b7bdbca1b..2956a5ecbc04 100644
--- a/nixos/modules/services/networking/resilio.nix
+++ b/nixos/modules/services/networking/resilio.nix
@@ -17,7 +17,7 @@ let
 
     search_lan = entry.searchLAN;
     use_sync_trash = entry.useSyncTrash;
-    known_hosts = knownHosts;
+    known_hosts = entry.knownHosts;
   }) cfg.sharedFolders;
 
   configFile = pkgs.writeText "config.json" (builtins.toJSON ({
@@ -50,12 +50,7 @@ in
         description = ''
           If enabled, start the Resilio Sync daemon. Once enabled, you can
           interact with the service through the Web UI, or configure it in your
-          NixOS configuration. Enabling the <literal>resilio</literal> service
-          also installs a systemd user unit which can be used to start
-          user-specific copies of the daemon. Once installed, you can use
-          <literal>systemctl --user start resilio</literal> as your user to start
-          the daemon using the configuration file located at
-          <literal>$HOME/.config/resilio-sync/config.json</literal>.
+          NixOS configuration.
         '';
       };
 
diff --git a/nixos/modules/services/networking/rxe.nix b/nixos/modules/services/networking/rxe.nix
new file mode 100644
index 000000000000..a6a069ec50c0
--- /dev/null
+++ b/nixos/modules/services/networking/rxe.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.networking.rxe;
+
+  runRxeCmd = cmd: ifcs:
+    concatStrings ( map (x: "${pkgs.rdma-core}/bin/rxe_cfg -n ${cmd} ${x};") ifcs);
+
+  startScript = pkgs.writeShellScriptBin "rxe-start" ''
+    ${pkgs.rdma-core}/bin/rxe_cfg -n start
+    ${runRxeCmd "add" cfg.interfaces}
+    ${pkgs.rdma-core}/bin/rxe_cfg
+  '';
+
+  stopScript = pkgs.writeShellScriptBin "rxe-stop" ''
+    ${runRxeCmd "remove" cfg.interfaces }
+    ${pkgs.rdma-core}/bin/rxe_cfg -n stop
+  '';
+
+in {
+  ###### interface
+
+  options = {
+    networking.rxe = {
+      enable = mkEnableOption "RDMA over converged ethernet";
+      interfaces = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "eth0" ];
+        description = ''
+          Enable RDMA on the listed interfaces. The corresponding virtual
+          RDMA interfaces will be named rxe0 ... rxeN where the ordering
+          will be as they are named in the list. UDP port 4791 must be
+          open on the respective ethernet interfaces.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.rxe = {
+      path = with pkgs; [ kmod rdma-core ];
+      description = "RoCE interfaces";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-modules-load.service" "network-online.target" ];
+      wants = [ "network-pre.target" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "${startScript}/bin/rxe-start";
+        ExecStop = "${stopScript}/bin/rxe-stop";
+      };
+    };
+  };
+}
+
diff --git a/nixos/modules/services/networking/shadowsocks.nix b/nixos/modules/services/networking/shadowsocks.nix
new file mode 100644
index 000000000000..fe6d65a5f963
--- /dev/null
+++ b/nixos/modules/services/networking/shadowsocks.nix
@@ -0,0 +1,112 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.shadowsocks;
+
+  opts = {
+    server = cfg.localAddress;
+    server_port = cfg.port;
+    method = cfg.encryptionMethod;
+    mode = cfg.mode;
+    user = "nobody";
+    fast_open = true;
+  } // optionalAttrs (cfg.password != null) { password = cfg.password; };
+
+  configFile = pkgs.writeText "shadowsocks.json" (builtins.toJSON opts);
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.shadowsocks = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to run shadowsocks-libev shadowsocks server.
+        '';
+      };
+
+      localAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = ''
+          Local address to which the server binds.
+        '';
+      };
+
+      port = mkOption {
+        type = types.int;
+        default = 8388;
+        description = ''
+          Port which the server uses.
+        '';
+      };
+
+      password = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Password for connecting clients.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = ''
+          Password file with a password for connecting clients.
+        '';
+      };
+
+      mode = mkOption {
+        type = types.enum [ "tcp_only" "tcp_and_udp" "udp_only" ];
+        default = "tcp_and_udp";
+        description = ''
+          Relay protocols.
+        '';
+      };
+
+      encryptionMethod = mkOption {
+        type = types.str;
+        default = "chacha20-ietf-poly1305";
+        description = ''
+          Encryption method. See <link xlink:href="https://github.com/shadowsocks/shadowsocks-org/wiki/AEAD-Ciphers"/>.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = singleton
+      { assertion = cfg.password == null || cfg.passwordFile == null;
+        message = "Cannot use both password and passwordFile for shadowsocks-libev";
+      };
+
+    systemd.services.shadowsocks-libev = {
+      description = "shadowsocks-libev Daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.shadowsocks-libev ] ++ optional (cfg.passwordFile != null) pkgs.jq;
+      serviceConfig.PrivateTmp = true;
+      script = ''
+        ${optionalString (cfg.passwordFile != null) ''
+          cat ${configFile} | jq --arg password "$(cat "${cfg.passwordFile}")" '. + { password: $password }' > /tmp/shadowsocks.json
+        ''}
+        exec ss-server -c ${if cfg.passwordFile != null then "/tmp/shadowsocks.json" else configFile}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/softether.nix b/nixos/modules/services/networking/softether.nix
index 9087b75c29c1..65df93a00da9 100644
--- a/nixos/modules/services/networking/softether.nix
+++ b/nixos/modules/services/networking/softether.nix
@@ -5,6 +5,8 @@ with lib;
 let
   cfg = config.services.softether;
 
+  package = cfg.package.override { dataDir = cfg.dataDir; };
+
 in
 {
 
@@ -49,7 +51,7 @@ in
 
       dataDir = mkOption {
         type = types.string;
-        default = "${cfg.package.dataDir}";
+        default = "/var/lib/softether";
         description = ''
           Data directory for SoftEther VPN.
         '';
@@ -64,11 +66,8 @@ in
   config = mkIf cfg.enable (
 
     mkMerge [{
-      environment.systemPackages = [
-          (pkgs.lib.overrideDerivation cfg.package (attrs: {
-            dataDir = cfg.dataDir;
-          }))
-        ];
+      environment.systemPackages = [ package ];
+
       systemd.services."softether-init" = {
         description = "SoftEther VPN services initial task";
         wantedBy = [ "network.target" ];
@@ -80,11 +79,11 @@ in
             for d in vpnserver vpnbridge vpnclient vpncmd; do
                 if ! test -e ${cfg.dataDir}/$d; then
                     ${pkgs.coreutils}/bin/mkdir -m0700 -p ${cfg.dataDir}/$d
-                    install -m0600 ${cfg.package}${cfg.dataDir}/$d/hamcore.se2 ${cfg.dataDir}/$d/hamcore.se2
+                    install -m0600 ${package}${cfg.dataDir}/$d/hamcore.se2 ${cfg.dataDir}/$d/hamcore.se2
                 fi
             done
             rm -rf ${cfg.dataDir}/vpncmd/vpncmd
-            ln -s ${cfg.package}${cfg.dataDir}/vpncmd/vpncmd ${cfg.dataDir}/vpncmd/vpncmd
+            ln -s ${package}${cfg.dataDir}/vpncmd/vpncmd ${cfg.dataDir}/vpncmd/vpncmd
         '';
       };
     }
@@ -97,12 +96,12 @@ in
         wantedBy = [ "network.target" ];
         serviceConfig = {
           Type = "forking";
-          ExecStart = "${cfg.package}/bin/vpnserver start";
-          ExecStop = "${cfg.package}/bin/vpnserver stop";
+          ExecStart = "${package}/bin/vpnserver start";
+          ExecStop = "${package}/bin/vpnserver stop";
         };
         preStart = ''
             rm -rf ${cfg.dataDir}/vpnserver/vpnserver
-            ln -s ${cfg.package}${cfg.dataDir}/vpnserver/vpnserver ${cfg.dataDir}/vpnserver/vpnserver
+            ln -s ${package}${cfg.dataDir}/vpnserver/vpnserver ${cfg.dataDir}/vpnserver/vpnserver
         '';
         postStop = ''
             rm -rf ${cfg.dataDir}/vpnserver/vpnserver
@@ -118,12 +117,12 @@ in
         wantedBy = [ "network.target" ];
         serviceConfig = {
           Type = "forking";
-          ExecStart = "${cfg.package}/bin/vpnbridge start";
-          ExecStop = "${cfg.package}/bin/vpnbridge stop";
+          ExecStart = "${package}/bin/vpnbridge start";
+          ExecStop = "${package}/bin/vpnbridge stop";
         };
         preStart = ''
             rm -rf ${cfg.dataDir}/vpnbridge/vpnbridge
-            ln -s ${cfg.package}${cfg.dataDir}/vpnbridge/vpnbridge ${cfg.dataDir}/vpnbridge/vpnbridge
+            ln -s ${package}${cfg.dataDir}/vpnbridge/vpnbridge ${cfg.dataDir}/vpnbridge/vpnbridge
         '';
         postStop = ''
             rm -rf ${cfg.dataDir}/vpnbridge/vpnbridge
@@ -139,12 +138,12 @@ in
         wantedBy = [ "network.target" ];
         serviceConfig = {
           Type = "forking";
-          ExecStart = "${cfg.package}/bin/vpnclient start";
-          ExecStop = "${cfg.package}/bin/vpnclient stop";
+          ExecStart = "${package}/bin/vpnclient start";
+          ExecStop = "${package}/bin/vpnclient stop";
         };
         preStart = ''
             rm -rf ${cfg.dataDir}/vpnclient/vpnclient
-            ln -s ${cfg.package}${cfg.dataDir}/vpnclient/vpnclient ${cfg.dataDir}/vpnclient/vpnclient
+            ln -s ${package}${cfg.dataDir}/vpnclient/vpnclient ${cfg.dataDir}/vpnclient/vpnclient
         '';
         postStart = ''
             sleep 1
diff --git a/nixos/modules/services/networking/ssh/sshd.nix b/nixos/modules/services/networking/ssh/sshd.nix
index 8828429a8178..aab1203086ce 100644
--- a/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixos/modules/services/networking/ssh/sshd.nix
@@ -21,7 +21,7 @@ let
           daemon reads in addition to the the user's authorized_keys file.
           You can combine the <literal>keys</literal> and
           <literal>keyFiles</literal> options.
-          Warning: If you are using <literal>NixOps</literal> then don't use this 
+          Warning: If you are using <literal>NixOps</literal> then don't use this
           option since it will replace the key required for deployment via ssh.
         '';
       };
@@ -54,8 +54,6 @@ let
     ));
   in listToAttrs (map mkAuthKeyFile usersWithKeys);
 
-  supportOldHostKeys = !versionAtLeast config.system.stateVersion "15.07";
-
 in
 
 {
@@ -139,6 +137,14 @@ in
         '';
       };
 
+      openFirewall = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Whether to automatically open the specified ports in the firewall.
+        '';
+      };
+
       listenAddresses = mkOption {
         type = with types; listOf (submodule {
           options = {
@@ -191,9 +197,6 @@ in
         default =
           [ { type = "rsa"; bits = 4096; path = "/etc/ssh/ssh_host_rsa_key"; }
             { type = "ed25519"; path = "/etc/ssh/ssh_host_ed25519_key"; }
-          ] ++ optionals supportOldHostKeys
-          [ { type = "dsa"; path = "/etc/ssh/ssh_host_dsa_key"; }
-            { type = "ecdsa"; bits = 521; path = "/etc/ssh/ssh_host_ecdsa_key"; }
           ];
         description = ''
           NixOS can automatically generate SSH host keys.  This option
@@ -210,6 +213,65 @@ in
         description = "Files from which authorized keys are read.";
       };
 
+      kexAlgorithms = mkOption {
+        type = types.listOf types.str;
+        default = [
+          "curve25519-sha256@libssh.org"
+          "diffie-hellman-group-exchange-sha256"
+        ];
+        description = ''
+          Allowed key exchange algorithms
+          </para>
+          <para>
+          Defaults to recommended settings from both
+          <link xlink:href="https://stribika.github.io/2015/01/04/secure-secure-shell.html" />
+          and
+          <link xlink:href="https://wiki.mozilla.org/Security/Guidelines/OpenSSH#Modern_.28OpenSSH_6.7.2B.29" />
+        '';
+      };
+
+      ciphers = mkOption {
+        type = types.listOf types.str;
+        default = [
+          "chacha20-poly1305@openssh.com"
+          "aes256-gcm@openssh.com"
+          "aes128-gcm@openssh.com"
+          "aes256-ctr"
+          "aes192-ctr"
+          "aes128-ctr"
+        ];
+        description = ''
+          Allowed ciphers
+          </para>
+          <para>
+          Defaults to recommended settings from both
+          <link xlink:href="https://stribika.github.io/2015/01/04/secure-secure-shell.html" />
+          and
+          <link xlink:href="https://wiki.mozilla.org/Security/Guidelines/OpenSSH#Modern_.28OpenSSH_6.7.2B.29" />
+        '';
+      };
+
+      macs = mkOption {
+        type = types.listOf types.str;
+        default = [
+          "hmac-sha2-512-etm@openssh.com"
+          "hmac-sha2-256-etm@openssh.com"
+          "umac-128-etm@openssh.com"
+          "hmac-sha2-512"
+          "hmac-sha2-256"
+          "umac-128@openssh.com"
+        ];
+        description = ''
+          Allowed MACs
+          </para>
+          <para>
+          Defaults to recommended settings from both
+          <link xlink:href="https://stribika.github.io/2015/01/04/secure-secure-shell.html" />
+          and
+          <link xlink:href="https://wiki.mozilla.org/Security/Guidelines/OpenSSH#Modern_.28OpenSSH_6.7.2B.29" />
+        '';
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         default = "";
@@ -253,13 +315,10 @@ in
       let
         service =
           { description = "SSH Daemon";
-
             wantedBy = optional (!cfg.startWhenNeeded) "multi-user.target";
-
+            after = [ "network.target" ];
             stopIfChanged = false;
-
             path = [ cfgc.package pkgs.gawk ];
-
             environment.LD_LIBRARY_PATH = nssModulesPath;
 
             preStart =
@@ -310,7 +369,7 @@ in
 
       };
 
-    networking.firewall.allowedTCPPorts = cfg.ports;
+    networking.firewall.allowedTCPPorts = if cfg.openFirewall then cfg.ports else [];
 
     security.pam.services.sshd =
       { startSession = true;
@@ -363,14 +422,13 @@ in
           HostKey ${k.path}
         '')}
 
-        # Allow DSA client keys for now. (These were deprecated
-        # in OpenSSH 7.0.)
-        PubkeyAcceptedKeyTypes +ssh-dss
+        KexAlgorithms ${concatStringsSep "," cfg.kexAlgorithms}
+        Ciphers ${concatStringsSep "," cfg.ciphers}
+        MACs ${concatStringsSep "," cfg.macs}
 
-        # Re-enable DSA host keys for now.
-        ${optionalString supportOldHostKeys ''
-          HostKeyAlgorithms +ssh-dss
-        ''}
+        # LogLevel VERBOSE logs user's key fingerprint on login.
+        # Needed to have a clear audit track of which key was used to log in.
+        LogLevel VERBOSE
       '';
 
     assertions = [{ assertion = if cfg.forwardX11 then cfgc.setXAuthLocation else true;
diff --git a/nixos/modules/services/networking/strongswan.nix b/nixos/modules/services/networking/strongswan.nix
index 3a3f64221c42..707d24b9220f 100644
--- a/nixos/modules/services/networking/strongswan.nix
+++ b/nixos/modules/services/networking/strongswan.nix
@@ -32,13 +32,13 @@ let
       ${caConf}
     '';
 
-  strongswanConf = {setup, connections, ca, secrets, managePlugins, enabledPlugins}: toFile "strongswan.conf" ''
+  strongswanConf = {setup, connections, ca, secretsFile, managePlugins, enabledPlugins}: toFile "strongswan.conf" ''
     charon {
       ${if managePlugins then "load_modular = no" else ""}
       ${if managePlugins then ("load = " + (concatStringsSep " " enabledPlugins)) else ""}
       plugins {
         stroke {
-          secrets_file = ${ipsecSecrets secrets}
+          secrets_file = ${secretsFile}
         }
       }
     }
@@ -135,7 +135,18 @@ in
     };
   };
 
-  config = with cfg; mkIf enable {
+
+  config = with cfg;
+  let
+    secretsFile = ipsecSecrets cfg.secrets;
+  in
+  mkIf enable
+    {
+
+    # here we should use the default strongswan ipsec.secrets and
+    # append to it (default one is empty so not a pb for now)
+    environment.etc."ipsec.secrets".source = secretsFile;
+
     systemd.services.strongswan = {
       description = "strongSwan IPSec Service";
       wantedBy = [ "multi-user.target" ];
@@ -143,11 +154,15 @@ in
       wants = [ "keys.target" ];
       after = [ "network-online.target" "keys.target" ];
       environment = {
-        STRONGSWAN_CONF = strongswanConf { inherit setup connections ca secrets managePlugins enabledPlugins; };
+        STRONGSWAN_CONF = strongswanConf { inherit setup connections ca secretsFile managePlugins enabledPlugins; };
       };
       serviceConfig = {
         ExecStart  = "${pkgs.strongswan}/sbin/ipsec start --nofork";
       };
+      preStart = ''
+        # with 'nopeerdns' setting, ppp writes into this folder
+        mkdir -m 700 -p /etc/ppp
+      '';
     };
   };
 }
diff --git a/nixos/modules/services/networking/stunnel.nix b/nixos/modules/services/networking/stunnel.nix
new file mode 100644
index 000000000000..89a14966eca7
--- /dev/null
+++ b/nixos/modules/services/networking/stunnel.nix
@@ -0,0 +1,221 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.stunnel;
+  yesNo = val: if val then "yes" else "no";
+
+  verifyChainPathAssert = n: c: {
+    assertion = c.verifyHostname == null || (c.verifyChain || c.verifyPeer);
+    message =  "stunnel: \"${n}\" client configuration - hostname verification " +
+      "is not possible without either verifyChain or verifyPeer enabled";
+  };
+
+  serverConfig = {
+    options = {
+      accept = mkOption {
+        type = types.int;
+        description = "On which port stunnel should listen for incoming TLS connections.";
+      };
+
+      connect = mkOption {
+        type = types.int;
+        description = "To which port the decrypted connection should be forwarded.";
+      };
+
+      cert = mkOption {
+        type = types.path;
+        description = "File containing both the private and public keys.";
+      };
+    };
+  };
+
+  clientConfig = {
+    options = {
+      accept = mkOption {
+        type = types.string;
+        description = "IP:Port on which connections should be accepted.";
+      };
+
+      connect = mkOption {
+        type = types.string;
+        description = "IP:Port destination to connect to.";
+      };
+
+      verifyChain = mkOption {
+        type = types.bool;
+        default = true;
+        description = "Check if the provided certificate has a valid certificate chain (against CAPath).";
+      };
+
+      verifyPeer = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Check if the provided certificate is contained in CAPath.";
+      };
+
+      CAPath = mkOption {
+        type = types.path;
+        default = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt";
+        description = "Path to a file containing certificates to validate against.";
+      };
+
+      verifyHostname = mkOption {
+        type = with types; nullOr string;
+        default = null;
+        description = "If set, stunnel checks if the provided certificate is valid for the given hostname.";
+      };
+    };
+  };
+
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.stunnel = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable the stunnel TLS tunneling service.";
+      };
+
+      user = mkOption {
+        type = with types; nullOr string;
+        default = "nobody";
+        description = "The user under which stunnel runs.";
+      };
+
+      group = mkOption {
+        type = with types; nullOr string;
+        default = "nogroup";
+        description = "The group under which stunnel runs.";
+      };
+
+      logLevel = mkOption {
+        type = types.enum [ "emerg" "alert" "crit" "err" "warning" "notice" "info" "debug" ];
+        default = "info";
+        description = "Verbosity of stunnel output.";
+      };
+
+      fipsMode = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Enable FIPS 140-2 mode required for compliance.";
+      };
+
+      enableInsecureSSLv3 = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Enable support for the insecure SSLv3 protocol.";
+      };
+
+
+      servers = mkOption {
+        description = "Define the server configuations.";
+        type = with types; attrsOf (submodule serverConfig);
+        example = {
+          fancyWebserver = {
+            enable = true;
+            accept = 443;
+            connect = 8080;
+            cert = "/path/to/pem/file";
+          };
+        };
+        default = { };
+      };
+
+      clients = mkOption {
+        description = "Define the client configurations.";
+        type = with types; attrsOf (submodule clientConfig);
+        example = {
+          foobar = {
+            accept = "0.0.0.0:8080";
+            connect = "nixos.org:443";
+            verifyChain = false;
+          };
+        };
+        default = { };
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = concatLists [
+      (singleton {
+        assertion = (length (attrValues cfg.servers) != 0) || ((length (attrValues cfg.clients)) != 0);
+        message = "stunnel: At least one server- or client-configuration has to be present.";
+      })
+
+      (mapAttrsToList verifyChainPathAssert cfg.clients)
+    ];
+
+    environment.systemPackages = [ pkgs.stunnel ];
+
+    environment.etc."stunnel.cfg".text = ''
+      ${ if cfg.user != null then "setuid = ${cfg.user}" else "" }
+      ${ if cfg.group != null then "setgid = ${cfg.group}" else "" }
+
+      debug = ${cfg.logLevel}
+
+      ${ optionalString cfg.fipsMode "fips = yes" }
+      ${ optionalString cfg.enableInsecureSSLv3 "options = -NO_SSLv3" }
+
+      ; ----- SERVER CONFIGURATIONS -----
+      ${ lib.concatStringsSep "\n"
+           (lib.mapAttrsToList
+             (n: v: ''
+               [${n}]
+               accept = ${toString v.accept}
+               connect = ${toString v.connect}
+               cert = ${v.cert}
+
+             '')
+           cfg.servers)
+      }
+
+      ; ----- CLIENT CONFIGURATIONS -----
+      ${ lib.concatStringsSep "\n"
+           (lib.mapAttrsToList
+             (n: v: ''
+               [${n}]
+               client = yes
+               accept = ${v.accept}
+               connect = ${v.connect}
+               verifyChain = ${yesNo v.verifyChain}
+               verifyPeer = ${yesNo v.verifyPeer}
+               ${optionalString (v.CAPath != null) "CApath = ${v.CAPath}"}
+               ${optionalString (v.verifyHostname != null) "checkHost = ${v.verifyHostname}"}
+               OCSPaia = yes
+
+             '')
+           cfg.clients)
+      }
+    '';
+
+    systemd.services.stunnel = {
+      description = "stunnel TLS tunneling service";
+      after = [ "network.target" ];
+      wants = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ config.environment.etc."stunnel.cfg".source ];
+      serviceConfig = {
+        ExecStart = "${pkgs.stunnel}/bin/stunnel ${config.environment.etc."stunnel.cfg".source}";
+        Type = "forking";
+      };
+    };
+
+  };
+
+}
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index dcdc203bdc62..e485c073cbdd 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -16,12 +16,6 @@ in {
         available on http://127.0.0.1:8384/.
       '';
 
-      useInotify = mkOption {
-        type = types.bool;
-        default = false;
-        description = "Provide syncthing-inotify as a service.";
-      };
-
       systemService = mkOption {
         type = types.bool;
         default = true;
@@ -90,6 +84,12 @@ in {
     };
   };
 
+  imports = [
+    (mkRemovedOptionModule ["services" "syncthing" "useInotify"] ''
+      This option was removed because syncthing now has the inotify functionality included under the name "fswatcher".
+      It can be enabled on a per-folder basis through the webinterface.
+    '')
+  ];
 
   ###### implementation
 
@@ -100,8 +100,7 @@ in {
       allowedUDPPorts = [ 21027 ];
     };
 
-    systemd.packages = [ pkgs.syncthing ]
-                       ++ lib.optional cfg.useInotify pkgs.syncthing-inotify;
+    systemd.packages = [ pkgs.syncthing ];
 
     users = mkIf (cfg.user == defaultUser) {
       extraUsers."${defaultUser}" =
@@ -125,7 +124,6 @@ in {
           STNOUPGRADE = "yes";
           inherit (cfg) all_proxy;
         } // config.networking.proxy.envVars;
-        wants = mkIf cfg.useInotify [ "syncthing-inotify.service" ];
         wantedBy = [ "multi-user.target" ];
         serviceConfig = {
           Restart = "on-failure";
@@ -141,20 +139,6 @@ in {
       syncthing-resume = {
         wantedBy = [ "suspend.target" ];
       };
-
-      syncthing-inotify = mkIf (cfg.systemService && cfg.useInotify) {
-        description = "Syncthing Inotify File Watcher service";
-        after = [ "network.target" "syncthing.service" ];
-        requires = [ "syncthing.service" ];
-        wantedBy = [ "multi-user.target" ];
-        serviceConfig = {
-          SuccessExitStatus = "2";
-          RestartForceExitStatus = "3";
-          Restart = "on-failure";
-          User = cfg.user;
-          ExecStart = "${pkgs.syncthing-inotify.bin}/bin/syncthing-inotify -home=${cfg.dataDir} -logflags=0";
-        };
-      };
     };
   };
 }
diff --git a/nixos/modules/services/networking/tcpcrypt.nix b/nixos/modules/services/networking/tcpcrypt.nix
index 2f304165eb4b..ee005e11aa32 100644
--- a/nixos/modules/services/networking/tcpcrypt.nix
+++ b/nixos/modules/services/networking/tcpcrypt.nix
@@ -44,9 +44,9 @@ in
       path = [ pkgs.iptables pkgs.tcpcrypt pkgs.procps ];
 
       preStart = ''
-        mkdir -p /var/run/tcpcryptd
-        chown tcpcryptd /var/run/tcpcryptd
-        sysctl -n net.ipv4.tcp_ecn >/run/pre-tcpcrypt-ecn-state
+        mkdir -p /run/tcpcryptd
+        chown tcpcryptd /run/tcpcryptd
+        sysctl -n net.ipv4.tcp_ecn > /run/tcpcryptd/pre-tcpcrypt-ecn-state
         sysctl -w net.ipv4.tcp_ecn=0
 
         iptables -t raw -N nixos-tcpcrypt
@@ -61,8 +61,8 @@ in
       script = "tcpcryptd -x 0x10";
 
       postStop = ''
-        if [ -f /run/pre-tcpcrypt-ecn-state ]; then
-          sysctl -w net.ipv4.tcp_ecn=$(cat /run/pre-tcpcrypt-ecn-state)
+        if [ -f /run/tcpcryptd/pre-tcpcrypt-ecn-state ]; then
+          sysctl -w net.ipv4.tcp_ecn=$(cat /run/tcpcryptd/pre-tcpcrypt-ecn-state)
         fi
 
         iptables -t mangle -D POSTROUTING -j nixos-tcpcrypt || true
diff --git a/nixos/modules/services/networking/tinc.nix b/nixos/modules/services/networking/tinc.nix
index adb80ea29840..e3c9b5282b8c 100644
--- a/nixos/modules/services/networking/tinc.nix
+++ b/nixos/modules/services/networking/tinc.nix
@@ -178,6 +178,8 @@ in
         preStart = ''
           mkdir -p /etc/tinc/${network}/hosts
           chown tinc.${network} /etc/tinc/${network}/hosts
+          mkdir -p /etc/tinc/${network}/invitations
+          chown tinc.${network} /etc/tinc/${network}/invitations
 
           # Determine how we should generate our keys
           if type tinc >/dev/null 2>&1; then
diff --git a/nixos/modules/services/networking/tox-bootstrapd.nix b/nixos/modules/services/networking/tox-bootstrapd.nix
index c1f945773e23..cb0e6b158651 100644
--- a/nixos/modules/services/networking/tox-bootstrapd.nix
+++ b/nixos/modules/services/networking/tox-bootstrapd.nix
@@ -69,7 +69,7 @@ in
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig =
-        { ExecStart = "${pkg}/bin/tox-bootstrapd ${cfgFile}";
+        { ExecStart = "${pkg}/bin/tox-bootstrapd --config=${cfgFile}";
           Type = "forking";
           inherit PIDFile;
           User = "tox-bootstrapd";
diff --git a/nixos/modules/services/networking/unbound.nix b/nixos/modules/services/networking/unbound.nix
index 545ee327d596..f069a9883a7f 100644
--- a/nixos/modules/services/networking/unbound.nix
+++ b/nixos/modules/services/networking/unbound.nix
@@ -112,7 +112,7 @@ in
         mkdir -m 0755 -p ${stateDir}/dev/
         cp ${confFile} ${stateDir}/unbound.conf
         ${optionalString cfg.enableRootTrustAnchor ''
-        ${pkgs.unbound}/bin/unbound-anchor -a ${rootTrustAnchorFile}
+        ${pkgs.unbound}/bin/unbound-anchor -a ${rootTrustAnchorFile} || echo "Root anchor updated!"
         chown unbound ${stateDir} ${rootTrustAnchorFile}
         ''}
         touch ${stateDir}/dev/random
diff --git a/nixos/modules/services/networking/zerotierone.nix b/nixos/modules/services/networking/zerotierone.nix
index 86e0204ec2f7..cd1617b8e2ba 100644
--- a/nixos/modules/services/networking/zerotierone.nix
+++ b/nixos/modules/services/networking/zerotierone.nix
@@ -7,6 +7,16 @@ let
 in
 {
   options.services.zerotierone.enable = mkEnableOption "ZeroTierOne";
+
+  options.services.zerotierone.joinNetworks = mkOption {
+    default = [];
+    example = [ "a8a2c3c10c1a68de" ];
+    type = types.listOf types.str;
+    description = ''
+      List of ZeroTier Network IDs to join on startup
+    '';
+  };
+
   options.services.zerotierone.package = mkOption {
     default = pkgs.zerotierone;
     defaultText = "pkgs.zerotierone";
@@ -22,12 +32,13 @@ in
       path = [ cfg.package ];
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      preStart =
-        ''
-        mkdir -p /var/lib/zerotier-one
+      preStart = ''
+        mkdir -p /var/lib/zerotier-one/networks.d
         chmod 700 /var/lib/zerotier-one
         chown -R root:root /var/lib/zerotier-one
-        '';
+      '' + (concatMapStrings (netId: ''
+        touch "/var/lib/zerotier-one/networks.d/${netId}.conf"
+      '') cfg.joinNetworks);
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/zerotier-one";
         Restart = "always";
@@ -38,6 +49,9 @@ in
     # ZeroTier does not issue DHCP leases, but some strangers might...
     networking.dhcpcd.denyInterfaces = [ "zt0" ];
 
+    # ZeroTier receives UDP transmissions on port 9993 by default
+    networking.firewall.allowedUDPPorts = [ 9993 ];
+
     environment.systemPackages = [ cfg.package ];
   };
 }
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index 4c7f58d1d8bc..ecab8cfc7df9 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -124,7 +124,7 @@ in
 
       listenAddresses = mkOption {
         type = types.listOf types.str;
-        default = [ "127.0.0.1:631" ];
+        default = [ "localhost:631" ];
         example = [ "*:631" ];
         description = ''
           A list of addresses and ports on which to listen.
@@ -321,7 +321,10 @@ in
             ''}
           '';
 
-          serviceConfig.PrivateTmp = true;
+          serviceConfig = {
+            PrivateTmp = true;
+            RuntimeDirectory = [ "cups" ];
+          };
       };
 
     systemd.services.cups-browsed = mkIf avahiEnabled
diff --git a/nixos/modules/services/scheduling/fcron.nix b/nixos/modules/services/scheduling/fcron.nix
index ac589be57736..e3b6b638f5a7 100644
--- a/nixos/modules/services/scheduling/fcron.nix
+++ b/nixos/modules/services/scheduling/fcron.nix
@@ -90,16 +90,24 @@ in
       [ (allowdeny "allow" (cfg.allow))
         (allowdeny "deny" cfg.deny)
         # see man 5 fcron.conf
-        { source = pkgs.writeText "fcron.conf" ''
-            fcrontabs   =       /var/spool/fcron
-            pidfile     =       /var/run/fcron.pid
-            fifofile    =       /var/run/fcron.fifo
-            fcronallow  =       /etc/fcron.allow
-            fcrondeny   =       /etc/fcron.deny
-            shell       =       /bin/sh
-            sendmail    =       /run/wrappers/bin/sendmail
-            editor      =       ${pkgs.vim}/bin/vim
-          '';
+        { source =
+            let
+              isSendmailWrapped =
+                lib.hasAttr "sendmail" config.security.wrappers;
+              sendmailPath =
+                if isSendmailWrapped then "/run/wrappers/bin/sendmail"
+                else "${config.system.path}/bin/sendmail";
+            in
+            pkgs.writeText "fcron.conf" ''
+              fcrontabs   =       /var/spool/fcron
+              pidfile     =       /var/run/fcron.pid
+              fifofile    =       /var/run/fcron.fifo
+              fcronallow  =       /etc/fcron.allow
+              fcrondeny   =       /etc/fcron.deny
+              shell       =       /bin/sh
+              sendmail    =       ${sendmailPath}
+              editor      =       ${pkgs.vim}/bin/vim
+            '';
           target = "fcron.conf";
           gid = config.ids.gids.fcron;
           mode = "0644";
diff --git a/nixos/modules/services/search/elasticsearch.nix b/nixos/modules/services/search/elasticsearch.nix
index c51dd5d94655..d61f588205af 100644
--- a/nixos/modules/services/search/elasticsearch.nix
+++ b/nixos/modules/services/search/elasticsearch.nix
@@ -6,6 +6,7 @@ let
   cfg = config.services.elasticsearch;
 
   es5 = builtins.compareVersions (builtins.parseDrvName cfg.package.name).version "5" >= 0;
+  es6 = builtins.compareVersions (builtins.parseDrvName cfg.package.name).version "6" >= 0;
 
   esConfig = ''
     network.host: ${cfg.listenAddress}
@@ -31,8 +32,11 @@ let
       (if es5 then (pkgs.writeTextDir "log4j2.properties" cfg.logging)
               else (pkgs.writeTextDir "logging.yml" cfg.logging))
     ];
-    # Elasticsearch 5.x won't start when the scripts directory does not exist
-    postBuild = if es5 then "${pkgs.coreutils}/bin/mkdir -p $out/scripts" else "";
+    postBuild = concatStringsSep "\n" (concatLists [
+      # Elasticsearch 5.x won't start when the scripts directory does not exist
+      (optional es5 "${pkgs.coreutils}/bin/mkdir -p $out/scripts")
+      (optional es6 "ln -s ${cfg.package}/config/jvm.options $out/jvm.options")
+    ]);
   };
 
   esPlugins = pkgs.buildEnv {
@@ -92,8 +96,6 @@ in {
         node.name: "elasticsearch"
         node.master: true
         node.data: false
-        index.number_of_shards: 5
-        index.number_of_replicas: 1
       '';
     };
 
@@ -165,7 +167,10 @@ in {
       path = [ pkgs.inetutils ];
       environment = {
         ES_HOME = cfg.dataDir;
-        ES_JAVA_OPTS = toString ([ "-Des.path.conf=${configDir}" ] ++ cfg.extraJavaOptions);
+        ES_JAVA_OPTS = toString ( optional (!es6) [ "-Des.path.conf=${configDir}" ]
+                                  ++ cfg.extraJavaOptions);
+      } // optionalAttrs es6 {
+        ES_PATH_CONF = configDir;
       };
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/elasticsearch ${toString cfg.extraCmdLineOptions}";
diff --git a/nixos/modules/services/security/clamav.nix b/nixos/modules/services/security/clamav.nix
index f71f30fee97a..4161c61ed375 100644
--- a/nixos/modules/services/security/clamav.nix
+++ b/nixos/modules/services/security/clamav.nix
@@ -76,8 +76,9 @@ in
     };
   };
 
-  config = mkIf cfg.updater.enable or cfg.daemon.enable {
+  config = mkIf (cfg.updater.enable || cfg.daemon.enable) {
     environment.systemPackages = [ pkg ];
+
     users.extraUsers = singleton {
       name = clamavUser;
       uid = config.ids.uids.clamav;
@@ -94,10 +95,10 @@ in
     environment.etc."clamav/freshclam.conf".source = freshclamConfigFile;
     environment.etc."clamav/clamd.conf".source = clamdConfigFile;
 
-    systemd.services.clamav-daemon = mkIf cfg.daemon.enable {
+    systemd.services.clamav-daemon = optionalAttrs cfg.daemon.enable {
       description = "ClamAV daemon (clamd)";
-      after = mkIf cfg.updater.enable [ "clamav-freshclam.service" ];
-      requires = mkIf cfg.updater.enable [ "clamav-freshclam.service" ];
+      after = optional cfg.updater.enable "clamav-freshclam.service";
+      requires = optional cfg.updater.enable "clamav-freshclam.service";
       wantedBy = [ "multi-user.target" ];
       restartTriggers = [ clamdConfigFile ];
 
@@ -115,7 +116,7 @@ in
       };
     };
 
-    systemd.timers.clamav-freshclam = mkIf cfg.updater.enable {
+    systemd.timers.clamav-freshclam = optionalAttrs cfg.updater.enable {
       description = "Timer for ClamAV virus database updater (freshclam)";
       wantedBy = [ "timers.target" ];
       timerConfig = {
@@ -124,7 +125,7 @@ in
       };
     };
 
-    systemd.services.clamav-freshclam = mkIf cfg.updater.enable {
+    systemd.services.clamav-freshclam = optionalAttrs cfg.updater.enable {
       description = "ClamAV virus database updater (freshclam)";
       restartTriggers = [ freshclamConfigFile ];
 
diff --git a/nixos/modules/services/security/hologram-agent.nix b/nixos/modules/services/security/hologram-agent.nix
index 6c53a2df6306..39ed506f7617 100644
--- a/nixos/modules/services/security/hologram-agent.nix
+++ b/nixos/modules/services/security/hologram-agent.nix
@@ -35,10 +35,9 @@ in {
   config = mkIf cfg.enable {
     boot.kernelModules = [ "dummy" ];
 
-    networking.interfaces.dummy0 = {
-      ipAddress = "169.254.169.254";
-      prefixLength = 32;
-    };
+    networking.interfaces.dummy0.ipv4.addresses = [
+      { address = "169.254.169.254"; prefixLength = 32; }
+    ];
 
     systemd.services.hologram-agent = {
       description = "Provide EC2 instance credentials to machines outside of EC2";
diff --git a/nixos/modules/services/security/hologram-server.nix b/nixos/modules/services/security/hologram-server.nix
index e267fed27955..bad02c7440ba 100644
--- a/nixos/modules/services/security/hologram-server.nix
+++ b/nixos/modules/services/security/hologram-server.nix
@@ -12,16 +12,20 @@ let
         dn       = cfg.ldapBindDN;
         password = cfg.ldapBindPassword;
       };
-      insecureldap = cfg.ldapInsecure;
-      userattr     = cfg.ldapUserAttr;
-      baseDN       = cfg.ldapBaseDN;
+      insecureldap    = cfg.ldapInsecure;
+      userattr        = cfg.ldapUserAttr;
+      baseDN          = cfg.ldapBaseDN;
+      enableldapRoles = cfg.enableLdapRoles;
+      roleAttr        = cfg.roleAttr;
+      groupClassAttr  = cfg.groupClassAttr;
     };
     aws = {
       account     = cfg.awsAccount;
       defaultrole = cfg.awsDefaultRole;
     };
-    stats  = cfg.statsAddress;
-    listen = cfg.listenAddress;
+    stats        = cfg.statsAddress;
+    listen       = cfg.listenAddress;
+    cachetimeout = cfg.cacheTimeoutSeconds;
   });
 in {
   options = {
@@ -70,6 +74,24 @@ in {
         description = "Password of account to use to query the LDAP server";
       };
 
+      enableLdapRoles = mkOption {
+        type        = types.bool;
+        default     = false;
+        description = "Whether to assign user roles based on the user's LDAP group memberships";
+      };
+
+      groupClassAttr = mkOption {
+        type = types.str;
+        default = "groupOfNames";
+        description = "The objectclass attribute to search for groups when enableLdapRoles is true";
+      };
+
+      roleAttr = mkOption {
+        type        = types.str;
+        default     = "businessCategory";
+        description = "Which LDAP group attribute to search for authorized role ARNs";
+      };
+
       awsAccount = mkOption {
         type        = types.str;
         description = "AWS account number";
@@ -85,6 +107,12 @@ in {
         default     = "";
         description = "Address of statsd server";
       };
+
+      cacheTimeoutSeconds = mkOption {
+        type        = types.int;
+        default     = 3600;
+        description = "How often (in seconds) to refresh the LDAP cache";
+      };
     };
   };
 
diff --git a/nixos/modules/services/security/physlock.nix b/nixos/modules/services/security/physlock.nix
index 30224d7fc6ba..97fbd6aae6e0 100644
--- a/nixos/modules/services/security/physlock.nix
+++ b/nixos/modules/services/security/physlock.nix
@@ -30,6 +30,20 @@ in
         '';
       };
 
+      allowAnyUser = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to allow any user to lock the screen. This will install a
+          setuid wrapper to allow any user to start physlock as root, which
+          is a minor security risk. Call the physlock binary to use this instead
+          of using the systemd service.
+
+          Note that you might need to relog to have the correct binary in your
+          PATH upon changing this option.
+        '';
+      };
+
       disableSysRq = mkOption {
         type = types.bool;
         default = true;
@@ -79,28 +93,36 @@ in
 
   ###### implementation
 
-  config = mkIf cfg.enable {
-
-    # for physlock -l and physlock -L
-    environment.systemPackages = [ pkgs.physlock ];
-
-    systemd.services."physlock" = {
-      enable = true;
-      description = "Physlock";
-      wantedBy = optional cfg.lockOn.suspend   "suspend.target"
-              ++ optional cfg.lockOn.hibernate "hibernate.target"
-              ++ cfg.lockOn.extraTargets;
-      before   = optional cfg.lockOn.suspend   "systemd-suspend.service"
-              ++ optional cfg.lockOn.hibernate "systemd-hibernate.service"
-              ++ cfg.lockOn.extraTargets;
-      serviceConfig.Type = "forking";
-      script = ''
-        ${pkgs.physlock}/bin/physlock -d${optionalString cfg.disableSysRq "s"}
-      '';
-    };
+  config = mkIf cfg.enable (mkMerge [
+    {
+
+      # for physlock -l and physlock -L
+      environment.systemPackages = [ pkgs.physlock ];
+
+      systemd.services."physlock" = {
+        enable = true;
+        description = "Physlock";
+        wantedBy = optional cfg.lockOn.suspend   "suspend.target"
+                ++ optional cfg.lockOn.hibernate "hibernate.target"
+                ++ cfg.lockOn.extraTargets;
+        before   = optional cfg.lockOn.suspend   "systemd-suspend.service"
+                ++ optional cfg.lockOn.hibernate "systemd-hibernate.service"
+                ++ cfg.lockOn.extraTargets;
+        serviceConfig = {
+          Type = "forking";
+          ExecStart = "${pkgs.physlock}/bin/physlock -d${optionalString cfg.disableSysRq "s"}";
+        };
+      };
 
-    security.pam.services.physlock = {};
+      security.pam.services.physlock = {};
 
-  };
+    }
+
+    (mkIf cfg.allowAnyUser {
+
+      security.wrappers.physlock = { source = "${pkgs.physlock}/bin/physlock"; user = "root"; };
+
+    })
+  ]);
 
 }
diff --git a/nixos/modules/services/security/tor.nix b/nixos/modules/services/security/tor.nix
index bc79d9f2a590..806252f49b8d 100644
--- a/nixos/modules/services/security/tor.nix
+++ b/nixos/modules/services/security/tor.nix
@@ -5,10 +5,31 @@ with lib;
 let
   cfg = config.services.tor;
   torDirectory = "/var/lib/tor";
+  torRunDirectory = "/run/tor";
 
   opt    = name: value: optionalString (value != null) "${name} ${value}";
   optint = name: value: optionalString (value != null && value != 0)    "${name} ${toString value}";
 
+  isolationOptions = {
+    type = types.listOf (types.enum [
+      "IsolateClientAddr"
+      "IsolateSOCKSAuth"
+      "IsolateClientProtocol"
+      "IsolateDestPort"
+      "IsolateDestAddr"
+    ]);
+    default = [];
+    example = [
+      "IsolateClientAddr"
+      "IsolateSOCKSAuth"
+      "IsolateClientProtocol"
+      "IsolateDestPort"
+      "IsolateDestAddr"
+    ];
+    description = "Tor isolation options";
+  };
+
+
   torRc = ''
     User tor
     DataDirectory ${torDirectory}
@@ -18,12 +39,23 @@ let
     ''}
 
     ${optint "ControlPort" cfg.controlPort}
+    ${optionalString cfg.controlSocket.enable "ControlSocket ${torRunDirectory}/control GroupWritable RelaxDirModeCheck"}
   ''
   # Client connection config
-  + optionalString cfg.client.enable  ''
-    SOCKSPort ${cfg.client.socksListenAddress} IsolateDestAddr
+  + optionalString cfg.client.enable ''
+    SOCKSPort ${cfg.client.socksListenAddress} ${toString cfg.client.socksIsolationOptions}
     SOCKSPort ${cfg.client.socksListenAddressFaster}
     ${opt "SocksPolicy" cfg.client.socksPolicy}
+
+    ${optionalString cfg.client.transparentProxy.enable ''
+    TransPort ${cfg.client.transparentProxy.listenAddress} ${toString cfg.client.transparentProxy.isolationOptions}
+    ''}
+
+    ${optionalString cfg.client.dns.enable ''
+    DNSPort ${cfg.client.dns.listenAddress} ${toString cfg.client.dns.isolationOptions}
+    AutomapHostsOnResolve 1
+    AutomapHostsSuffixes ${concatStringsSep "," cfg.client.dns.automapHostsSuffixes}
+    ''}
   ''
   # Relay config
   + optionalString cfg.relay.enable ''
@@ -58,6 +90,9 @@ let
     ${flip concatMapStrings v.map (p: ''
       HiddenServicePort ${toString p.port} ${p.destination}
     '')}
+    ${optionalString (v.authorizeClient != null) ''
+      HiddenServiceAuthorizeClient ${v.authorizeClient.authType} ${concatStringsSep "," v.authorizeClient.clientNames}
+    ''}
   ''))
   + cfg.extraConfig;
 
@@ -107,6 +142,17 @@ in
         '';
       };
 
+      controlSocket = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = ''
+            Wheter to enable Tor control socket. Control socket is created
+            in <literal>${torRunDirectory}/control</literal>
+          '';
+        };
+      };
+
       client = {
         enable = mkOption {
           type = types.bool;
@@ -154,6 +200,55 @@ in
           '';
         };
 
+        socksIsolationOptions = mkOption (isolationOptions // {
+          default = ["IsolateDestAddr"];
+        });
+
+        transparentProxy = {
+          enable = mkOption {
+            type = types.bool;
+            default = false;
+            description = "Whether to enable tor transaprent proxy";
+          };
+
+          listenAddress = mkOption {
+            type = types.str;
+            default = "127.0.0.1:9040";
+            example = "192.168.0.1:9040";
+            description = ''
+              Bind transparent proxy to this address.
+            '';
+          };
+
+          isolationOptions = mkOption isolationOptions;
+        };
+
+        dns = {
+          enable = mkOption {
+            type = types.bool;
+            default = false;
+            description = "Whether to enable tor dns resolver";
+          };
+
+          listenAddress = mkOption {
+            type = types.str;
+            default = "127.0.0.1:9053";
+            example = "192.168.0.1:9053";
+            description = ''
+              Bind tor dns to this address.
+            '';
+          };
+
+          isolationOptions = mkOption isolationOptions;
+
+          automapHostsSuffixes = mkOption {
+            type = types.listOf types.str;
+            default = [".onion" ".exit"];
+            example = [".onion"];
+            description = "List of suffixes to use with automapHostsOnResolve";
+          };
+        };
+
         privoxy.enable = mkOption {
           type = types.bool;
           default = true;
@@ -540,6 +635,33 @@ in
                }));
              };
 
+             authorizeClient = mkOption {
+               default = null;
+               description = "If configured, the hidden service is accessible for authorized clients only.";
+               type = types.nullOr (types.submodule ({config, ...}: {
+
+                 options = {
+
+                   authType = mkOption {
+                     type = types.enum [ "basic" "stealth" ];
+                     description = ''
+                       Either <literal>"basic"</literal> for a general-purpose authorization protocol
+                       or <literal>"stealth"</literal> for a less scalable protocol
+                       that also hides service activity from unauthorized clients.
+                     '';
+                   };
+
+                   clientNames = mkOption {
+                     type = types.nonEmptyListOf (types.strMatching "[A-Za-z0-9+-_]+");
+                     description = ''
+                       Only clients that are listed here are authorized to access the hidden service.
+                       Generated authorization data can be found in <filename>${torDirectory}/onion/$name/hostname</filename>.
+                       Clients need to put this authorization data in their configuration file using <literal>HidServAuth</literal>.
+                     '';
+                   };
+                 };
+               }));
+             };
           };
 
           config = {
@@ -581,14 +703,10 @@ in
         after    = [ "network.target" ];
         restartTriggers = [ torRcFile ];
 
-        # Translated from the upstream contrib/dist/tor.service.in
-        preStart = ''
-          install -o tor -g tor -d ${torDirectory}/onion
-          ${pkgs.tor}/bin/tor -f ${torRcFile} --verify-config
-        '';
-
         serviceConfig =
           { Type         = "simple";
+            # Translated from the upstream contrib/dist/tor.service.in
+            ExecStartPre = "${pkgs.tor}/bin/tor -f ${torRcFile} --verify-config";
             ExecStart    = "${pkgs.tor}/bin/tor -f ${torRcFile} --RunAsDaemon 0";
             ExecReload   = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
             KillSignal   = "SIGINT";
@@ -603,11 +721,13 @@ in
             #   DeviceAllow /dev/urandom r
             # .. but we can't specify DeviceAllow multiple times. 'closed'
             # is close enough.
+            RuntimeDirectory        = "tor";
+            StateDirectory          = [ "tor" "tor/onion" ];
             PrivateTmp              = "yes";
             DevicePolicy            = "closed";
             InaccessibleDirectories = "/home";
             ReadOnlyDirectories     = "/";
-            ReadWriteDirectories    = torDirectory;
+            ReadWriteDirectories    = [torDirectory torRunDirectory];
             NoNewPrivileges         = "yes";
           };
       };
diff --git a/nixos/modules/services/security/torify.nix b/nixos/modules/services/security/torify.nix
index a29cb3f33dae..08da726437ea 100644
--- a/nixos/modules/services/security/torify.nix
+++ b/nixos/modules/services/security/torify.nix
@@ -7,7 +7,7 @@ let
   torify = pkgs.writeTextFile {
     name = "tsocks";
     text = ''
-        #!${pkgs.stdenv.shell}
+        #!${pkgs.runtimeShell}
         TSOCKS_CONF_FILE=${pkgs.writeText "tsocks.conf" cfg.tsocks.config} LD_PRELOAD="${pkgs.tsocks}/lib/libtsocks.so $LD_PRELOAD" "$@"
     '';
     executable = true;
diff --git a/nixos/modules/services/security/torsocks.nix b/nixos/modules/services/security/torsocks.nix
index 1b5a05b21e77..c60c745443bc 100644
--- a/nixos/modules/services/security/torsocks.nix
+++ b/nixos/modules/services/security/torsocks.nix
@@ -23,7 +23,7 @@ let
   wrapTorsocks = name: server: pkgs.writeTextFile {
     name = name;
     text = ''
-        #!${pkgs.stdenv.shell}
+        #!${pkgs.runtimeShell}
         TORSOCKS_CONF_FILE=${pkgs.writeText "torsocks.conf" (configFile server)} ${pkgs.torsocks}/bin/torsocks "$@"
     '';
     executable = true;
diff --git a/nixos/modules/services/security/usbguard.nix b/nixos/modules/services/security/usbguard.nix
index 1f2c56a9efa1..5d469cabe2cb 100644
--- a/nixos/modules/services/security/usbguard.nix
+++ b/nixos/modules/services/security/usbguard.nix
@@ -56,7 +56,7 @@ in {
       };
 
       rules = mkOption {
-        type = types.nullOr types.str;
+        type = types.nullOr types.lines;
         default = null;
         example = ''
           allow with-interface equals { 08:*:* }
@@ -192,7 +192,7 @@ in {
 
       serviceConfig = {
         Type = "simple";
-        ExecStart = ''${pkgs.usbguard}/bin/usbguard-daemon -d -k -c ${daemonConfFile}'';
+        ExecStart = ''${pkgs.usbguard}/bin/usbguard-daemon -P -d -k -c ${daemonConfFile}'';
         Restart = "on-failure";
       };
     };
diff --git a/nixos/modules/services/system/localtime.nix b/nixos/modules/services/system/localtime.nix
new file mode 100644
index 000000000000..b9355bbb9441
--- /dev/null
+++ b/nixos/modules/services/system/localtime.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.localtime;
+in {
+  options = {
+    services.localtime = {
+      enable = mkOption {
+        default = false;
+        description = ''
+          Enable <literal>localtime</literal>, simple daemon for keeping the system
+          timezone up-to-date based on the current location. It uses geoclue2 to
+          determine the current location and systemd-timedated to actually set
+          the timezone.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.geoclue2.enable = true;
+
+    security.polkit.extraConfig = ''
+     polkit.addRule(function(action, subject) {
+       if (action.id == "org.freedesktop.timedate1.set-timezone"
+           && subject.user == "localtimed") {
+         return polkit.Result.YES;
+       }
+     });
+    '';
+
+    users.users = [{
+      name = "localtimed";
+      description = "Taskserver user";
+    }];
+
+    systemd.services.localtime = {
+      description = "localtime service";
+      wantedBy = [ "multi-user.target" ];
+      partOf = [ "geoclue.service "];
+
+      serviceConfig = {
+        Restart                 = "on-failure";
+        # TODO: make it work with dbus
+        #DynamicUser             = true;
+        Nice                    = 10;
+        User                    = "localtimed";
+        PrivateTmp              = "yes";
+        PrivateDevices          = true;
+        PrivateNetwork          = "yes";
+        NoNewPrivileges         = "yes";
+        ProtectSystem           = "strict";
+        ProtectHome             = true;
+        ExecStart               = "${pkgs.localtime}/bin/localtimed";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/torrent/transmission.nix b/nixos/modules/services/torrent/transmission.nix
index dd6b585b7e23..3564afd77f41 100644
--- a/nixos/modules/services/torrent/transmission.nix
+++ b/nixos/modules/services/torrent/transmission.nix
@@ -21,6 +21,19 @@ let
 
   # for users in group "transmission" to have access to torrents
   fullSettings = { umask = 2; download-dir = downloadDir; incomplete-dir = incompleteDir; } // cfg.settings;
+
+  # Directories transmission expects to exist and be ug+rwx.
+  directoriesToManage = [ homeDir settingsDir fullSettings.download-dir fullSettings.incomplete-dir ];
+
+  preStart = pkgs.writeScript "transmission-pre-start" ''
+    #!${pkgs.runtimeShell}
+    set -ex
+    for DIR in ${escapeShellArgs directoriesToManage}; do
+      mkdir -p "$DIR"
+      chmod 770 "$DIR"
+    done
+    cp -f ${settingsFile} ${settingsDir}/settings.json
+  '';
 in
 {
   options = {
@@ -59,8 +72,8 @@ in
           time the service starts). String values must be quoted, integer and
           boolean values must not.
 
-          See https://trac.transmissionbt.com/wiki/EditConfigFiles for
-          documentation.
+          See https://github.com/transmission/transmission/wiki/Editing-Configuration-Files
+          for documentation.
         '';
       };
 
@@ -89,9 +102,7 @@ in
 
       # 1) Only the "transmission" user and group have access to torrents.
       # 2) Optionally update/force specific fields into the configuration file.
-      serviceConfig.ExecStartPre = ''
-          ${pkgs.stdenv.shell} -c "mkdir -p ${homeDir} ${settingsDir} ${fullSettings.download-dir} ${fullSettings.incomplete-dir} && chmod 770 ${homeDir} ${settingsDir} ${fullSettings.download-dir} ${fullSettings.incomplete-dir} && rm -f ${settingsDir}/settings.json && cp -f ${settingsFile} ${settingsDir}/settings.json"
-      '';
+      serviceConfig.ExecStartPre = preStart;
       serviceConfig.ExecStart = "${pkgs.transmission}/bin/transmission-daemon -f --port ${toString config.services.transmission.port}";
       serviceConfig.ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
       serviceConfig.User = "transmission";
@@ -136,6 +147,7 @@ in
           ${getLib pkgs.libcap}/lib/libcap*.so*            mr,
           ${getLib pkgs.attr}/lib/libattr*.so*             mr,
           ${getLib pkgs.lz4}/lib/liblz4*.so*               mr,
+          ${getLib pkgs.libkrb5}/lib/lib*.so*              mr,
 
           @{PROC}/sys/kernel/random/uuid   r,
           @{PROC}/sys/vm/overcommit_memory r,
diff --git a/nixos/modules/services/ttys/agetty.nix b/nixos/modules/services/ttys/agetty.nix
index 3429397d2cc2..b50de496e975 100644
--- a/nixos/modules/services/ttys/agetty.nix
+++ b/nixos/modules/services/ttys/agetty.nix
@@ -64,8 +64,8 @@ in
 
   config = {
     # Note: this is set here rather than up there so that changing
-    # nixosLabel would not rebuild manual pages
-    services.mingetty.greetingLine = mkDefault ''<<< Welcome to NixOS ${config.system.nixosLabel} (\m) - \l >>>'';
+    # nixos.label would not rebuild manual pages
+    services.mingetty.greetingLine = mkDefault ''<<< Welcome to NixOS ${config.system.nixos.label} (\m) - \l >>>'';
 
     systemd.services."getty@" =
       { serviceConfig.ExecStart = [
diff --git a/nixos/modules/services/web-apps/atlassian/jira.nix b/nixos/modules/services/web-apps/atlassian/jira.nix
index 81ee8154326c..13c5951524d9 100644
--- a/nixos/modules/services/web-apps/atlassian/jira.nix
+++ b/nixos/modules/services/web-apps/atlassian/jira.nix
@@ -155,7 +155,7 @@ in
       requires = [ "postgresql.service" ];
       after = [ "postgresql.service" ];
 
-      path = [ cfg.jrePackage ];
+      path = [ cfg.jrePackage pkgs.bash ];
 
       environment = {
         JIRA_USER = cfg.user;
diff --git a/nixos/modules/services/web-apps/piwik-doc.xml b/nixos/modules/services/web-apps/matomo-doc.xml
index a393a182d36a..456aae6cc366 100644
--- a/nixos/modules/services/web-apps/piwik-doc.xml
+++ b/nixos/modules/services/web-apps/matomo-doc.xml
@@ -2,16 +2,16 @@
          xmlns:xlink="http://www.w3.org/1999/xlink"
          xmlns:xi="http://www.w3.org/2001/XInclude"
          version="5.0"
-         xml:id="module-services-piwik">
+         xml:id="module-services-matomo">
 
-  <title>Piwik</title>
+  <title>Matomo</title>
   <para>
-    Piwik is a real-time web analytics application.
-    This module configures php-fpm as backend for piwik, optionally configuring an nginx vhost as well.
+    Matomo is a real-time web analytics application.
+    This module configures php-fpm as backend for Matomo, optionally configuring an nginx vhost as well.
   </para>
 
   <para>
-    An automatic setup is not suported by piwik, so you need to configure piwik itself in the browser-based piwik setup.
+    An automatic setup is not suported by Matomo, so you need to configure Matomo itself in the browser-based Matomo setup.
   </para>
 
 
@@ -19,7 +19,7 @@
     <title>Database Setup</title>
 
     <para>
-      You also need to configure a MariaDB or MySQL database and -user for piwik yourself,
+      You also need to configure a MariaDB or MySQL database and -user for Matomo yourself,
       and enter those credentials in your browser.
       You can use passwordless database authentication via the UNIX_SOCKET authentication plugin
       with the following SQL commands:
@@ -27,20 +27,20 @@
       <programlisting>
         # For MariaDB
         INSTALL PLUGIN unix_socket SONAME 'auth_socket';
-        CREATE DATABASE piwik;
-        CREATE USER 'piwik'@'localhost' IDENTIFIED WITH unix_socket;
-        GRANT ALL PRIVILEGES ON piwik.* TO 'piwik'@'localhost';
+        CREATE DATABASE matomo;
+        CREATE USER 'matomo'@'localhost' IDENTIFIED WITH unix_socket;
+        GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost';
 
         # For MySQL
         INSTALL PLUGIN auth_socket SONAME 'auth_socket.so';
-        CREATE DATABASE piwik;
-        CREATE USER 'piwik'@'localhost' IDENTIFIED WITH auth_socket;
-        GRANT ALL PRIVILEGES ON piwik.* TO 'piwik'@'localhost';
+        CREATE DATABASE matomo;
+        CREATE USER 'matomo'@'localhost' IDENTIFIED WITH auth_socket;
+        GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost';
       </programlisting>
 
-      Then fill in <literal>piwik</literal> as database user and database name, and leave the password field blank.
-      This authentication works by allowing only the <literal>piwik</literal> unix user to authenticate as the 
-      <literal>piwik</literal> database user (without needing a password), but no other users.
+      Then fill in <literal>matomo</literal> as database user and database name, and leave the password field blank.
+      This authentication works by allowing only the <literal>matomo</literal> unix user to authenticate as the
+      <literal>matomo</literal> database user (without needing a password), but no other users.
       For more information on passwordless login, see
       <link xlink:href="https://mariadb.com/kb/en/mariadb/unix_socket-authentication-plugin/" />.
     </para>
@@ -55,9 +55,9 @@
     <title>Backup</title>
     <para>
       You only need to take backups of your MySQL database and the
-      <filename>/var/lib/piwik/config/config.ini.php</filename> file.
-      Use a user in the <literal>piwik</literal> group or root to access the file.
-      For more information, see <link xlink:href="https://piwik.org/faq/how-to-install/faq_138/" />.
+      <filename>/var/lib/matomo/config/config.ini.php</filename> file.
+      Use a user in the <literal>matomo</literal> group or root to access the file.
+      For more information, see <link xlink:href="https://matomo.org/faq/how-to-install/faq_138/" />.
     </para>
   </section>
 
@@ -67,14 +67,14 @@
     <itemizedlist>
       <listitem>
         <para>
-          Piwik's file integrity check will warn you.
+          Matomo's file integrity check will warn you.
           This is due to the patches necessary for NixOS, you can safely ignore this.
         </para>
       </listitem>
 
       <listitem>
         <para>
-          Piwik will warn you that the JavaScript tracker is not writable.
+          Matomo will warn you that the JavaScript tracker is not writable.
           This is because it's located in the read-only nix store.
           You can safely ignore this, unless you need a plugin that needs JavaScript tracker access.
         </para>
@@ -88,7 +88,7 @@
 
     <para>
       You can use other web servers by forwarding calls for <filename>index.php</filename> and
-      <filename>piwik.php</filename> to the <literal>/run/phpfpm-piwik.sock</literal> fastcgi unix socket.
+      <filename>piwik.php</filename> to the <literal>/run/phpfpm-matomo.sock</literal> fastcgi unix socket.
       You can use the nginx configuration in the module code as a reference to what else should be configured.
     </para>
   </section>
diff --git a/nixos/modules/services/web-apps/piwik.nix b/nixos/modules/services/web-apps/matomo.nix
index ce86c6873dd4..ef6ac9698e21 100644
--- a/nixos/modules/services/web-apps/piwik.nix
+++ b/nixos/modules/services/web-apps/matomo.nix
@@ -1,10 +1,11 @@
 { config, lib, pkgs, services, ... }:
 with lib;
 let
-  cfg = config.services.piwik;
+  cfg = config.services.matomo;
 
-  user = "piwik";
+  user = "matomo";
   dataDir = "/var/lib/${user}";
+  deprecatedDataDir = "/var/lib/piwik";
 
   pool = user;
   # it's not possible to use /run/phpfpm/${pool}.sock because /run/phpfpm/ is root:root 0770,
@@ -13,17 +14,22 @@ let
   phpExecutionUnit = "phpfpm-${pool}";
   databaseService = "mysql.service";
 
+  fqdn =
+    let
+      join = hostName: domain: hostName + optionalString (domain != null) ".${domain}";
+     in join config.networking.hostName config.networking.domain;
+
 in {
   options = {
-    services.piwik = {
+    services.matomo = {
       # NixOS PR for database setup: https://github.com/NixOS/nixpkgs/pull/6963
-      # piwik issue for automatic piwik setup: https://github.com/piwik/piwik/issues/10257
-      # TODO: find a nice way to do this when more NixOS MySQL and / or piwik automatic setup stuff is implemented.
+      # matomo issue for automatic matomo setup: https://github.com/matomo-org/matomo/issues/10257
+      # TODO: find a nice way to do this when more NixOS MySQL and / or matomo automatic setup stuff is implemented.
       enable = mkOption {
         type = types.bool;
         default = false;
         description = ''
-          Enable piwik web analytics with php-fpm backend.
+          Enable matomo web analytics with php-fpm backend.
           Either the nginx option or the webServerUser option is mandatory.
         '';
       };
@@ -32,8 +38,9 @@ in {
         type = types.nullOr types.str;
         default = null;
         example = "lighttpd";
+        # TODO: piwik.php might get renamed to matomo.php in future releases
         description = ''
-          Name of the web server user that forwards requests to the ${phpSocket} fastcgi socket for piwik if the nginx
+          Name of the web server user that forwards requests to the ${phpSocket} fastcgi socket for matomo if the nginx
           option is not used. Either this option or the nginx option is mandatory.
           If you want to use another webserver than nginx, you need to set this to that server's user
           and pass fastcgi requests to `index.php` and `piwik.php` to this socket.
@@ -55,7 +62,7 @@ in {
           catch_workers_output = yes
         '';
         description = ''
-          Settings for phpfpm's process manager. You might need to change this depending on the load for piwik.
+          Settings for phpfpm's process manager. You might need to change this depending on the load for matomo.
         '';
       };
 
@@ -65,7 +72,7 @@ in {
             (import ../web-servers/nginx/vhost-options.nix { inherit config lib; })
             {
               # enable encryption by default,
-              # as sensitive login and piwik data should not be transmitted in clear text.
+              # as sensitive login and matomo data should not be transmitted in clear text.
               options.forceSSL.default = true;
               options.enableACME.default = true;
             }
@@ -73,15 +80,19 @@ in {
         );
         default = null;
         example = {
-          serverName = "stats.$\{config.networking.hostName\}";
+          serverAliases = [
+            "matomo.$\{config.networking.domain\}"
+            "stats.$\{config.networking.domain\}"
+          ];
           enableACME = false;
         };
         description = ''
-            With this option, you can customize an nginx virtualHost which already has sensible defaults for piwik.
+            With this option, you can customize an nginx virtualHost which already has sensible defaults for matomo.
             Either this option or the webServerUser option is mandatory.
             Set this to {} to just enable the virtualHost if you don't need any customization.
-            If enabled, then by default, the serverName is piwik.$\{config.networking.hostName\}, SSL is active,
-            and certificates are acquired via ACME.
+            If enabled, then by default, the <option>serverName</option> is
+            <literal>${user}.$\{config.networking.hostName\}.$\{config.networking.domain\}</literal>,
+            SSL is active, and certificates are acquired via ACME.
             If this is set to null (the default), no nginx virtualHost will be configured.
         '';
       };
@@ -90,12 +101,12 @@ in {
 
   config = mkIf cfg.enable {
     warnings = mkIf (cfg.nginx != null && cfg.webServerUser != null) [
-      "If services.piwik.nginx is set, services.piwik.nginx.webServerUser is ignored and should be removed."
+      "If services.matomo.nginx is set, services.matomo.nginx.webServerUser is ignored and should be removed."
     ];
 
     assertions = [ {
         assertion = cfg.nginx != null || cfg.webServerUser != null;
-        message = "Either services.piwik.nginx or services.piwik.nginx.webServerUser is mandatory";
+        message = "Either services.matomo.nginx or services.matomo.nginx.webServerUser is mandatory";
     }];
 
     users.extraUsers.${user} = {
@@ -106,19 +117,20 @@ in {
     };
     users.extraGroups.${user} = {};
 
-    systemd.services.piwik_setup_update = {
-      # everything needs to set up and up to date before piwik php files are executed
+    systemd.services.matomo_setup_update = {
+      # everything needs to set up and up to date before matomo php files are executed
       requiredBy = [ "${phpExecutionUnit}.service" ];
       before = [ "${phpExecutionUnit}.service" ];
       # the update part of the script can only work if the database is already up and running
       requires = [ databaseService ];
       after = [ databaseService ];
-      path = [ pkgs.piwik ];
+      path = [ pkgs.matomo ];
       serviceConfig = {
         Type = "oneshot";
         User = user;
         # hide especially config.ini.php from other
         UMask = "0007";
+        # TODO: might get renamed to MATOMO_USER_PATH in future versions
         Environment = "PIWIK_USER_PATH=${dataDir}";
         # chown + chmod in preStart needs root
         PermissionsStartOnly = true;
@@ -127,27 +139,32 @@ in {
       # e.g. after restoring from backup or moving from another system.
       # Note that ${dataDir}/config/config.ini.php might contain the MySQL password.
       preStart = ''
+        # migrate data from piwik to matomo folder
+        if [ -d ${deprecatedDataDir} ]; then
+          echo "Migrating from ${deprecatedDataDir} to ${dataDir}"
+          mv -T ${deprecatedDataDir} ${dataDir}
+        fi
         chown -R ${user}:${user} ${dataDir}
         chmod -R ug+rwX,o-rwx ${dataDir}
         '';
       script = ''
-            # Use User-Private Group scheme to protect piwik data, but allow administration / backup via piwik group
+            # Use User-Private Group scheme to protect matomo data, but allow administration / backup via matomo group
             # Copy config folder
             chmod g+s "${dataDir}"
-            cp -r "${pkgs.piwik}/config" "${dataDir}/"
+            cp -r "${pkgs.matomo}/config" "${dataDir}/"
             chmod -R u+rwX,g+rwX,o-rwx "${dataDir}"
 
             # check whether user setup has already been done
             if test -f "${dataDir}/config/config.ini.php"; then
               # then execute possibly pending database upgrade
-              piwik-console core:update --yes
+              matomo-console core:update --yes
             fi
       '';
     };
 
     systemd.services.${phpExecutionUnit} = {
-      # stop phpfpm on package upgrade, do database upgrade via piwik_setup_update, and then restart
-      restartTriggers = [ pkgs.piwik ];
+      # stop phpfpm on package upgrade, do database upgrade via matomo_setup_update, and then restart
+      restartTriggers = [ pkgs.matomo ];
       # stop config.ini.php from getting written with read permission for others
       serviceConfig.UMask = "0007";
     };
@@ -175,14 +192,14 @@ in {
       # References:
       # https://fralef.me/piwik-hardening-with-nginx-and-php-fpm.html
       # https://github.com/perusio/piwik-nginx
-      "${user}.${config.networking.hostName}" = mkMerge [ cfg.nginx {
-        # don't allow to override the root easily, as it will almost certainly break piwik.
+      "${user}.${fqdn}" = mkMerge [ cfg.nginx {
+        # don't allow to override the root easily, as it will almost certainly break matomo.
         # disadvantage: not shown as default in docs.
-        root = mkForce "${pkgs.piwik}/share";
+        root = mkForce "${pkgs.matomo}/share";
 
         # define locations here instead of as the submodule option's default
         # so that they can easily be extended with additional locations if required
-        # without needing to redefine the piwik ones.
+        # without needing to redefine the matomo ones.
         # disadvantage: not shown as default in docs.
         locations."/" = {
           index = "index.php";
@@ -191,6 +208,7 @@ in {
         locations."= /index.php".extraConfig = ''
           fastcgi_pass unix:${phpSocket};
         '';
+        # TODO: might get renamed to matomo.php in future versions
         # allow piwik.php for tracking
         locations."= /piwik.php".extraConfig = ''
           fastcgi_pass unix:${phpSocket};
@@ -212,6 +230,7 @@ in {
         locations."= /robots.txt".extraConfig = ''
           return 200 "User-agent: *\nDisallow: /\n";
         '';
+        # TODO: might get renamed to matomo.js in future versions
         # let browsers cache piwik.js
         locations."= /piwik.js".extraConfig = ''
           expires 1M;
@@ -221,7 +240,7 @@ in {
   };
 
   meta = {
-    doc = ./piwik-doc.xml;
+    doc = ./matomo-doc.xml;
     maintainers = with stdenv.lib.maintainers; [ florianjacob ];
   };
 }
diff --git a/nixos/modules/services/web-apps/nexus.nix b/nixos/modules/services/web-apps/nexus.nix
index a750aa66b27c..f6a5ce73a12b 100644
--- a/nixos/modules/services/web-apps/nexus.nix
+++ b/nixos/modules/services/web-apps/nexus.nix
@@ -11,7 +11,7 @@ in
 {
   options = {
     services.nexus = {
-      enable = mkEnableOption "SonarType Nexus3 OSS service";
+      enable = mkEnableOption "Sonatype Nexus3 OSS service";
 
       user = mkOption {
         type = types.str;
@@ -54,7 +54,7 @@ in
     users.extraGroups."${cfg.group}" = {};
 
     systemd.services.nexus = {
-      description = "SonarType Nexus3";
+      description = "Sonatype Nexus3";
 
       wantedBy = [ "multi-user.target" ];
 
diff --git a/nixos/modules/services/web-apps/nixbot.nix b/nixos/modules/services/web-apps/nixbot.nix
deleted file mode 100644
index 0592d01bf369..000000000000
--- a/nixos/modules/services/web-apps/nixbot.nix
+++ /dev/null
@@ -1,149 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-  cfg = config.services.nixbot;
-  pyramidIni = ''
-    ###
-    # app configuration
-    # http://docs.pylonsproject.org/projects/pyramid/en/1.7-branch/narr/environment.html
-    ###
-
-    [app:main]
-    use = egg:nixbot
-
-    nixbot.github_token = ${cfg.githubToken}
-    nixbot.bot_name = ${cfg.botName}
-    nixbot.repo = ${cfg.repo}
-    nixbot.pr_repo = ${cfg.prRepo}
-    nixbot.hydra_jobsets_repo = ${cfg.hydraJobsetsRepo}
-    nixbot.github_secret = justnotsorandom
-    nixbot.public_url = ${cfg.publicUrl}
-    nixbot.repo_dir = ${cfg.repoDir}
-
-    pyramid.reload_templates = false
-    pyramid.debug_authorization = false
-    pyramid.debug_notfound = false
-    pyramid.debug_routematch = false
-    pyramid.default_locale_name = en
-
-    # By default, the toolbar only appears for clients from IP addresses
-    # '127.0.0.1' and '::1'.
-    # debugtoolbar.hosts = 127.0.0.1 ::1
-
-    ###
-    # wsgi server configuration
-    ###
-
-    [server:main]
-    use = egg:waitress#main
-    host = 0.0.0.0
-    port = 6543
-
-    ###
-    # logging configuration
-    # http://docs.pylonsproject.org/projects/pyramid/en/1.7-branch/narr/logging.html
-    ###
-
-    [loggers]
-    keys = root, nixbot
-
-    [handlers]
-    keys = console
-
-    [formatters]
-    keys = generic
-
-    [logger_root]
-    level = INFO
-    handlers = console
-
-    [logger_nixbot]
-    level = INFO
-    handlers =
-    qualname = nixbot
-
-    [handler_console]
-    class = StreamHandler
-    args = (sys.stderr,)
-    level = NOTSET
-    formatter = generic
-
-    [formatter_generic]
-    format = %(asctime)s %(levelname)-5.5s [%(name)s:%(lineno)s][%(threadName)s] %(message)s
-  '';
-in {
-  options = {
-    services.nixbot = {
-      enable = mkEnableOption "nixbot";
-
-      botName = mkOption {
-        type = types.str;
-        description = "The bot's github user account name.";
-        default = "nixbot";
-      };
-
-      githubToken = mkOption {
-        type = types.str;
-        description = "The bot's github user account token.";
-        example = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
-      };
-
-      repo = mkOption {
-        type = types.str;
-        description = "The github repository to check for PRs.";
-        example = "nixos/nixpkgs";
-      };
-
-      prRepo = mkOption {
-        type = types.str;
-        description = "The github repository to push the testing branches to.";
-        example = "nixos/nixpkgs-pr";
-      };
-
-      hydraJobsetsRepo = mkOption {
-        type = types.str;
-        description = "The github repository to push the hydra jobset definitions to.";
-        example = "nixos/hydra-jobsets";
-      };
-
-      publicUrl = mkOption {
-        type = types.str;
-        description = "The public URL the bot is reachable at (Github hook endpoint).";
-        example = "https://nixbot.nixos.org";
-      };
-
-      repoDir = mkOption {
-        type = types.path;
-        description = "The directory the repositories are stored in.";
-        default = "/var/lib/nixbot";
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    users.extraUsers.nixbot = {
-      createHome = true;
-      home = cfg.repoDir;
-    };
-
-    systemd.services.nixbot = let
-      env = pkgs.python3.buildEnv.override {
-        extraLibs = [ pkgs.nixbot ];
-      };
-    in {
-      after = [ "network.target" ];
-      wantedBy = [ "multi-user.target" ];
-      script = ''
-        ${env}/bin/pserve ${pkgs.writeText "production.ini" pyramidIni}
-      '';
-
-      serviceConfig = {
-        User = "nixbot";
-        Group = "nogroup";
-        PermissionsStartOnly = true;
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/web-apps/pump.io-configure.js b/nixos/modules/services/web-apps/pump.io-configure.js
deleted file mode 100644
index 1fbf346a34c4..000000000000
--- a/nixos/modules/services/web-apps/pump.io-configure.js
+++ /dev/null
@@ -1,23 +0,0 @@
-var fs = require('fs');
-
-var opts = JSON.parse(fs.readFileSync("/dev/stdin").toString());
-var config = opts.config;
-
-var readSecret = function(filename) {
-  return fs.readFileSync(filename).toString().trim();
-};
-
-if (opts.secretFile) {
-  config.secret = readSecret(opts.secretFile);
-}
-if (opts.dbPasswordFile) {
-  config.params.dbpass = readSecret(opts.dbPasswordFile);
-}
-if (opts.smtpPasswordFile) {
-  config.smtppass = readSecret(opts.smtpPasswordFile);
-}
-if (opts.spamClientSecretFile) {
-  config.spamclientsecret = readSecret(opts.opts.spamClientSecretFile);
-}
-
-fs.writeFileSync(opts.outputFile, JSON.stringify(config));
diff --git a/nixos/modules/services/web-apps/pump.io.nix b/nixos/modules/services/web-apps/pump.io.nix
deleted file mode 100644
index 27ae68516367..000000000000
--- a/nixos/modules/services/web-apps/pump.io.nix
+++ /dev/null
@@ -1,438 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-  cfg = config.services.pumpio;
-  dataDir = "/var/lib/pump.io";
-  runDir = "/run/pump.io";
-  user = "pumpio";
-
-  optionalSet = condition: value: if condition then value else {};
-
-  configScript = ./pump.io-configure.js;
-  configOptions = {
-    outputFile = "${runDir}/config.json";
-    config =
-      (optionalSet (cfg.driver != "disk") {
-        driver = cfg.driver;
-      }) //
-      {
-        params = (optionalSet (cfg.driver == "disk") { dir = dataDir; }) //
-                 (optionalSet (cfg.driver == "mongodb" || cfg.driver == "redis") {
-                   host = cfg.dbHost;
-                   port = cfg.dbPort;
-                   dbname = cfg.dbName;
-                   dbuser = cfg.dbUser;
-                   dbpass = cfg.dbPassword;
-                 }) //
-                 (optionalSet (cfg.driver == "memcached") {
-                   host = cfg.dbHost;
-                   port = cfg.dbPort;
-                 }) // cfg.driverParams;
-        secret = cfg.secret;
-
-        address = cfg.address;
-        port = cfg.port;
-
-        noweb = false;
-        urlPort = cfg.urlPort;
-        hostname = cfg.hostname;
-        favicon = cfg.favicon;
-
-        site = cfg.site;
-        owner = cfg.owner;
-        ownerURL = cfg.ownerURL;
-
-        key = cfg.sslKey;
-        cert = cfg.sslCert;
-        bounce = false;
-
-        spamhost = cfg.spamHost;
-        spamclientid = cfg.spamClientId;
-        spamclientsecret = cfg.spamClientSecret;
-
-        requireEmail = cfg.requireEmail;
-        smtpserver = cfg.smtpHost;
-        smtpport = cfg.smtpPort;
-        smtpuser = cfg.smtpUser;
-        smtppass = cfg.smtpPassword;
-        smtpusessl = cfg.smtpUseSSL;
-        smtpfrom = cfg.smtpFrom;
-
-        nologger = false;
-        enableUploads = cfg.enableUploads;
-        datadir = dataDir;
-        debugClient = false;
-        firehose = cfg.firehose;
-        disableRegistration = cfg.disableRegistration;
-
-        inherit (cfg) secretFile dbPasswordFile smtpPasswordFile spamClientSecretFile;
-      } //
-      (optionalSet (cfg.port < 1024) {
-        serverUser = user;  # have pump.io listen then drop privileges
-      }) // cfg.extraConfig;
-}; in {
-  options = {
-
-    services.pumpio = {
-
-      enable = mkEnableOption "Pump.io social streams server";
-
-      secret = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        example = "my dog has fleas";
-        description = ''
-          A session-generating secret, server-wide password.  Warning:
-          this is stored in cleartext in the Nix store!
-        '';
-      };
-
-      secretFile = mkOption {
-        type = types.nullOr types.path;
-        default = null;
-        example = "/run/keys/pump.io-secret";
-        description = ''
-          A file containing the session-generating secret,
-          server-wide password.
-        '';
-      };
-
-      site = mkOption {
-        type = types.str;
-        example = "Awesome Sauce";
-        description = "Name of the server";
-      };
-
-      owner = mkOption {
-        type = types.str;
-        default = "";
-        example = "Awesome Inc.";
-        description = "Name of owning entity, if you want to link to it.";
-      };
-
-      ownerURL = mkOption {
-        type = types.str;
-        default = "";
-        example = "https://pump.io";
-        description = "URL of owning entity, if you want to link to it.";
-      };
-
-      address = mkOption {
-        type = types.str;
-        default = "localhost";
-        description = ''
-          Web server listen address.
-        '';
-      };
-
-      port = mkOption {
-        type = types.int;
-        default = 31337;
-        description = ''
-          Port to listen on. Defaults to 31337, which is suitable for
-          running behind a reverse proxy. For a standalone server,
-          use 443.
-        '';
-      };
-
-      hostname = mkOption {
-        type = types.nullOr types.str;
-        default = "localhost";
-        description = ''
-          The hostname of the server, used for generating
-          URLs. Defaults to "localhost" which doesn't do much for you.
-        '';
-      };
-
-      urlPort = mkOption {
-        type = types.int;
-        default = 443;
-        description = ''
-          Port to use for generating URLs. This basically has to be
-          either 80 or 443 because the host-meta and Webfinger
-          protocols don't make any provision for HTTP/HTTPS servers
-          running on other ports.
-        '';
-      };
-
-      favicon = mkOption {
-        type = types.nullOr types.path;
-        default = null;
-        description = ''
-          Local filesystem path to the favicon.ico file to use. This
-          will be served as "/favicon.ico" by the server.
-        '';
-      };
-
-      enableUploads = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          If you want to disable file uploads, set this to false. Uploaded files will be stored
-          in ${dataDir}/uploads.
-        '';
-      };
-
-      sslKey = mkOption {
-        type = types.path;
-        example = "${dataDir}/myserver.key";
-        default = "";
-        description = ''
-          The path to the server certificate private key. The
-          certificate is required, but it can be self-signed.
-        '';
-      };
-
-      sslCert = mkOption {
-        type = types.path;
-        example = "${dataDir}/myserver.crt";
-        default = "";
-        description = ''
-          The path to the server certificate. The certificate is
-          required, but it can be self-signed.
-        '';
-      };
-
-      firehose = mkOption {
-        type = types.str;
-        default = "ofirehose.com";
-        description = ''
-          Firehose host running the ofirehose software. Defaults to
-          "ofirehose.com". Public notices will be ping this firehose
-          server and from there go out to search engines and the
-          world. If you want to disconnect from the public web, set
-          this to something falsy.
-        '';
-      };
-
-      disableRegistration = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Disables registering new users on the site through the Web
-          or the API.
-        '';
-      };
-
-      requireEmail = mkOption {
-        type = types.bool;
-        default = false;
-        description = "Require an e-mail address to register.";
-      };
-
-      extraConfig = mkOption {
-        default = { };
-        description = ''
-          Extra configuration options which are serialized to json and added
-          to the pump.io.json config file.
-        '';
-      };
-
-      driver = mkOption {
-        type = types.enum [ "mongodb" "disk" "lrucache" "memcached" "redis" ];
-        default = "mongodb";
-        description = "Type of database. Corresponds to a nodejs databank driver.";
-      };
-
-      driverParams = mkOption {
-        default = { };
-        description = "Extra parameters for the driver.";
-      };
-
-      dbHost = mkOption {
-        type = types.str;
-        default = "localhost";
-        description = "The database host to connect to.";
-      };
-
-      dbPort = mkOption {
-        type = types.int;
-        default = 27017;
-        description = "The port that the database is listening on.";
-      };
-
-      dbName = mkOption {
-        type = types.str;
-        default = "pumpio";
-        description = "The name of the database to use.";
-      };
-
-      dbUser = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        description = ''
-          The username. Defaults to null, meaning no authentication.
-        '';
-      };
-
-      dbPassword = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        description = ''
-          The password corresponding to dbUser.  Warning: this is
-          stored in cleartext in the Nix store!
-        '';
-      };
-
-      dbPasswordFile = mkOption {
-        type = types.nullOr types.path;
-        default = null;
-        example = "/run/keys/pump.io-dbpassword";
-        description = ''
-          A file containing the password corresponding to dbUser.
-        '';
-      };
-
-      smtpHost = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        example = "localhost";
-        description = ''
-          Server to use for sending transactional email. If it's not
-          set up, no email is sent and features like password recovery
-          and email notification won't work.
-        '';
-      };
-
-      smtpPort = mkOption {
-        type = types.int;
-        default = 25;
-        description = ''
-          Port to connect to on SMTP server.
-        '';
-      };
-
-      smtpUser = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        description = ''
-          Username to use to connect to SMTP server. Might not be
-          necessary for some servers.
-        '';
-      };
-
-      smtpPassword = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        description = ''
-          Password to use to connect to SMTP server. Might not be
-          necessary for some servers.  Warning: this is stored in
-          cleartext in the Nix store!
-        '';
-      };
-
-      smtpPasswordFile = mkOption {
-        type = types.nullOr types.path;
-        default = null;
-        example = "/run/keys/pump.io-smtppassword";
-        description = ''
-          A file containing the password used to connect to SMTP
-          server. Might not be necessary for some servers.
-        '';
-      };
-
-
-      smtpUseSSL = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Only use SSL with the SMTP server. By default, a SSL
-          connection is negotiated using TLS. You may need to change
-          the smtpPort value if you set this.
-        '';
-      };
-
-      smtpFrom = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        description = ''
-          Email address to use in the "From:" header of outgoing
-          notifications. Defaults to 'no-reply@' plus the site
-          hostname.
-        '';
-      };
-
-      spamHost = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        description = ''
-          Host running activityspam software to use to test updates
-          for spam.
-        '';
-      };
-      spamClientId = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        description = "OAuth pair for spam server.";
-      };
-      spamClientSecret = mkOption {
-        type = types.nullOr types.str;
-        default = null;
-        description = ''
-          OAuth pair for spam server.  Warning: this is
-          stored in cleartext in the Nix store!
-        '';
-      };
-      spamClientSecretFile = mkOption {
-        type = types.nullOr types.path;
-        default = null;
-        example = "/run/keys/pump.io-spamclientsecret";
-        description = ''
-          A file containing the OAuth key for the spam server.
-        '';
-      };
-    };
-
-  };
-
-  config = mkIf cfg.enable {
-    warnings = let warn = k: optional (cfg.${k} != null)
-                 "config.services.pumpio.${k} is insecure. Use ${k}File instead.";
-               in concatMap warn [ "secret" "dbPassword" "smtpPassword" "spamClientSecret" ];
-
-    assertions = [
-      { assertion = !(isNull cfg.secret && isNull cfg.secretFile);
-        message = "pump.io needs a secretFile configured";
-      }
-    ];
-
-    systemd.services."pump.io" =
-      { description = "Pump.io - stream server that does most of what people really want from a social network";
-        after = [ "network.target" ];
-        wantedBy = [ "multi-user.target" ];
-
-        preStart = ''
-          mkdir -p ${dataDir}/uploads
-          mkdir -p ${runDir}
-          chown pumpio:pumpio ${dataDir}/uploads ${runDir}
-          chmod 770 ${dataDir}/uploads ${runDir}
-
-          ${pkgs.nodejs}/bin/node ${configScript} <<EOF
-          ${builtins.toJSON configOptions}
-          EOF
-
-          chgrp pumpio ${configOptions.outputFile}
-          chmod 640 ${configOptions.outputFile}
-        '';
-
-        serviceConfig = {
-          ExecStart = "${pkgs.pumpio}/bin/pump -c ${configOptions.outputFile}";
-          PermissionsStartOnly = true;
-          User = if cfg.port < 1024 then "root" else user;
-          Group = user;
-        };
-        environment = { NODE_ENV = "production"; };
-      };
-
-      users.extraGroups.pumpio.gid = config.ids.gids.pumpio;
-      users.extraUsers.pumpio = {
-        group = "pumpio";
-        uid = config.ids.uids.pumpio;
-        description = "Pump.io user";
-        home = dataDir;
-        createHome = true;
-      };
-  };
-}
diff --git a/nixos/modules/services/web-apps/restya-board.nix b/nixos/modules/services/web-apps/restya-board.nix
new file mode 100644
index 000000000000..cee725e8fe5f
--- /dev/null
+++ b/nixos/modules/services/web-apps/restya-board.nix
@@ -0,0 +1,384 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+# TODO: are these php-packages needed?
+#imagick
+#php-geoip -> php.ini: extension = geoip.so
+#expat
+
+let
+  cfg = config.services.restya-board;
+
+  runDir = "/run/restya-board";
+
+  poolName = "restya-board";
+  phpfpmSocketName = "/var/run/phpfpm/${poolName}.sock";
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.restya-board = {
+
+      enable = mkEnableOption "restya-board";
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/restya-board";
+        example = "/var/lib/restya-board";
+        description = ''
+          Data of the application.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "restya-board";
+        example = "restya-board";
+        description = ''
+          User account under which the web-application runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nginx";
+        example = "nginx";
+        description = ''
+          Group account under which the web-application runs.
+        '';
+      };
+
+      virtualHost = {
+        serverName = mkOption {
+          type = types.str;
+          default = "restya.board";
+          description = ''
+            Name of the nginx virtualhost to use.
+          '';
+        };
+
+        listenHost = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = ''
+            Listen address for the virtualhost to use.
+          '';
+        };
+
+        listenPort = mkOption {
+          type = types.int;
+          default = 3000;
+          description = ''
+            Listen port for the virtualhost to use.
+          '';
+        };
+      };
+
+      database = {
+        host = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = ''
+            Host of the database. Leave 'null' to use a local PostgreSQL database.
+            A local PostgreSQL database is initialized automatically.
+          '';
+        };
+
+        port = mkOption {
+          type = types.nullOr types.int;
+          default = 5432;
+          description = ''
+            The database's port.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "restya_board";
+          description = ''
+            Name of the database. The database must exist.
+          '';
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "restya_board";
+          description = ''
+            The database user. The user must exist and have access to
+            the specified database.
+          '';
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = ''
+            The database user's password. 'null' if no password is set.
+          '';
+        };
+      };
+
+      email = {
+        server = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          example = "localhost";
+          description = ''
+            Hostname to send outgoing mail. Null to use the system MTA.
+          '';
+        };
+
+        port = mkOption {
+          type = types.int;
+          default = 25;
+          description = ''
+            Port used to connect to SMTP server.
+          '';
+        };
+
+        login = mkOption {
+          type = types.str;
+          default = "";
+          description = ''
+            SMTP authentication login used when sending outgoing mail.
+          '';
+        };
+
+        password = mkOption {
+          type = types.str;
+          default = "";
+          description = ''
+            SMTP authentication password used when sending outgoing mail.
+
+            ATTENTION: The password is stored world-readable in the nix-store!
+          '';
+        };
+      };
+
+      timezone = mkOption {
+        type = types.lines;
+        default = "GMT";
+        description = ''
+          Timezone the web-app runs in.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.phpfpm.poolConfigs = {
+      "${poolName}" = ''
+        listen = "${phpfpmSocketName}";
+        listen.owner = nginx
+        listen.group = nginx
+        listen.mode = 0600
+        user = ${cfg.user}
+        group = ${cfg.group}
+        pm = dynamic
+        pm.max_children = 75
+        pm.start_servers = 10
+        pm.min_spare_servers = 5
+        pm.max_spare_servers = 20
+        pm.max_requests = 500
+        catch_workers_output = 1
+      '';
+    };
+
+    services.phpfpm.phpOptions = ''
+      date.timezone = "CET"
+
+      ${optionalString (!isNull cfg.email.server) ''
+        SMTP = ${cfg.email.server}
+        smtp_port = ${toString cfg.email.port}
+        auth_username = ${cfg.email.login}
+        auth_password = ${cfg.email.password}
+      ''}
+    '';
+
+    services.nginx.enable = true;
+    services.nginx.virtualHosts."${cfg.virtualHost.serverName}" = {
+      listen = [ { addr = cfg.virtualHost.listenHost; port = cfg.virtualHost.listenPort; } ];
+      serverName = cfg.virtualHost.serverName;
+      root = runDir;
+      extraConfig = ''
+        index index.html index.php;
+
+        gzip on;
+        gzip_disable "msie6";
+
+        gzip_comp_level 6;
+        gzip_min_length  1100;
+        gzip_buffers 16 8k;
+        gzip_proxied any;
+        gzip_types text/plain application/xml text/css text/js text/xml application/x-javascript text/javascript application/json application/xml+rss;
+
+        client_max_body_size 300M;
+
+        rewrite ^/oauth/authorize$ /server/php/authorize.php last;
+        rewrite ^/oauth_callback/([a-zA-Z0-9_\.]*)/([a-zA-Z0-9_\.]*)$ /server/php/oauth_callback.php?plugin=$1&code=$2 last;
+        rewrite ^/download/([0-9]*)/([a-zA-Z0-9_\.]*)$ /server/php/download.php?id=$1&hash=$2 last;
+        rewrite ^/ical/([0-9]*)/([0-9]*)/([a-z0-9]*).ics$ /server/php/ical.php?board_id=$1&user_id=$2&hash=$3 last;
+        rewrite ^/api/(.*)$ /server/php/R/r.php?_url=$1&$args last;
+        rewrite ^/api_explorer/api-docs/$ /client/api_explorer/api-docs/index.php last;
+      '';
+
+      locations."/".root = "${runDir}/client";
+
+      locations."~ \.php$" = {
+        tryFiles = "$uri =404";
+        extraConfig = ''
+          include ${pkgs.nginx}/conf/fastcgi_params;
+          fastcgi_pass    unix:${phpfpmSocketName};
+          fastcgi_index   index.php;
+          fastcgi_param   SCRIPT_FILENAME $document_root$fastcgi_script_name;
+          fastcgi_param   PHP_VALUE "upload_max_filesize=9G \n post_max_size=9G \n max_execution_time=200 \n max_input_time=200 \n memory_limit=256M";
+        '';
+      };
+
+      locations."~* \.(css|js|less|html|ttf|woff|jpg|jpeg|gif|png|bmp|ico)" = {
+        root = "${runDir}/client";
+        extraConfig = ''
+          if (-f $request_filename) {
+                  break;
+          }
+          rewrite ^/img/([a-zA-Z_]*)/([a-zA-Z_]*)/([a-zA-Z0-9_\.]*)$ /server/php/image.php?size=$1&model=$2&filename=$3 last;
+          add_header        Cache-Control public;
+          add_header        Cache-Control must-revalidate;
+          expires           7d;
+        '';
+      };
+    };
+
+    systemd.services.restya-board-init = {
+      description = "Restya board initialization";
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "postgresql.service" ];
+      after = [ "network.target" "postgresql.service" ];
+
+      script = ''
+        rm -rf "${runDir}"
+        mkdir -m 750 -p "${runDir}"
+        cp -r "${pkgs.restya-board}/"* "${runDir}"
+        sed -i "s/@restya.com/@${cfg.virtualHost.serverName}/g" "${runDir}/sql/restyaboard_with_empty_data.sql"
+        rm -rf "${runDir}/media"
+        rm -rf "${runDir}/client/img"
+        chmod -R 0750 "${runDir}"
+
+        sed -i "s@^php@${config.services.phpfpm.phpPackage}/bin/php@" "${runDir}/server/php/shell/"*.sh
+
+        ${if (isNull cfg.database.host) then ''
+          sed -i "s/^.*'R_DB_HOST'.*$/define('R_DB_HOST', 'localhost');/g" "${runDir}/server/php/config.inc.php"
+          sed -i "s/^.*'R_DB_PASSWORD'.*$/define('R_DB_PASSWORD', 'restya');/g" "${runDir}/server/php/config.inc.php"
+        '' else ''
+          sed -i "s/^.*'R_DB_HOST'.*$/define('R_DB_HOST', '${cfg.database.host}');/g" "${runDir}/server/php/config.inc.php"
+          sed -i "s/^.*'R_DB_PASSWORD'.*$/define('R_DB_PASSWORD', '$(<${cfg.database.dbPassFile})');/g" "${runDir}/server/php/config.inc.php"
+        ''}
+        sed -i "s/^.*'R_DB_PORT'.*$/define('R_DB_PORT', '${toString cfg.database.port}');/g" "${runDir}/server/php/config.inc.php"
+        sed -i "s/^.*'R_DB_NAME'.*$/define('R_DB_NAME', '${cfg.database.name}');/g" "${runDir}/server/php/config.inc.php"
+        sed -i "s/^.*'R_DB_USER'.*$/define('R_DB_USER', '${cfg.database.user}');/g" "${runDir}/server/php/config.inc.php"
+
+        chmod 0400 "${runDir}/server/php/config.inc.php"
+
+        ln -sf "${cfg.dataDir}/media" "${runDir}/media"
+        ln -sf "${cfg.dataDir}/client/img" "${runDir}/client/img"
+
+        chmod g+w "${runDir}/tmp/cache"
+        chown -R "${cfg.user}"."${cfg.group}" "${runDir}"
+
+
+        mkdir -m 0750 -p "${cfg.dataDir}"
+        mkdir -m 0750 -p "${cfg.dataDir}/media"
+        mkdir -m 0750 -p "${cfg.dataDir}/client/img"
+        cp -r "${pkgs.restya-board}/media/"* "${cfg.dataDir}/media"
+        cp -r "${pkgs.restya-board}/client/img/"* "${cfg.dataDir}/client/img"
+        chown "${cfg.user}"."${cfg.group}" "${cfg.dataDir}"
+        chown -R "${cfg.user}"."${cfg.group}" "${cfg.dataDir}/media"
+        chown -R "${cfg.user}"."${cfg.group}" "${cfg.dataDir}/client/img"
+
+        ${optionalString (isNull cfg.database.host) ''
+          if ! [ -e "${cfg.dataDir}/.db-initialized" ]; then
+            ${pkgs.sudo}/bin/sudo -u ${config.services.postgresql.superUser} \
+              ${config.services.postgresql.package}/bin/psql -U ${config.services.postgresql.superUser} \
+              -c "CREATE USER ${cfg.database.user} WITH ENCRYPTED PASSWORD 'restya'"
+
+            ${pkgs.sudo}/bin/sudo -u ${config.services.postgresql.superUser} \
+              ${config.services.postgresql.package}/bin/psql -U ${config.services.postgresql.superUser} \
+              -c "CREATE DATABASE ${cfg.database.name} OWNER ${cfg.database.user} ENCODING 'UTF8' TEMPLATE template0"
+
+            ${pkgs.sudo}/bin/sudo -u ${cfg.user} \
+              ${config.services.postgresql.package}/bin/psql -U ${cfg.database.user} \
+              -d ${cfg.database.name} -f "${runDir}/sql/restyaboard_with_empty_data.sql"
+
+            touch "${cfg.dataDir}/.db-initialized"
+          fi
+        ''}
+      '';
+    };
+
+    systemd.timers.restya-board = {
+      description = "restya-board scripts for e.g. email notification";
+      wantedBy = [ "timers.target" ];
+      after = [ "restya-board-init.service" ];
+      requires = [ "restya-board-init.service" ];
+      timerConfig = {
+        OnUnitInactiveSec = "60s";
+        Unit = "restya-board-timers.service";
+      };
+    };
+
+    systemd.services.restya-board-timers = {
+      description = "restya-board scripts for e.g. email notification";
+      serviceConfig.Type = "oneshot";
+      serviceConfig.User = cfg.user;
+
+      after = [ "restya-board-init.service" ];
+      requires = [ "restya-board-init.service" ];
+
+      script = ''
+        /bin/sh ${runDir}/server/php/shell/instant_email_notification.sh 2> /dev/null || true
+        /bin/sh ${runDir}/server/php/shell/periodic_email_notification.sh 2> /dev/null || true
+        /bin/sh ${runDir}/server/php/shell/imap.sh 2> /dev/null || true
+        /bin/sh ${runDir}/server/php/shell/webhook.sh 2> /dev/null || true
+        /bin/sh ${runDir}/server/php/shell/card_due_notification.sh 2> /dev/null || true
+      '';
+    };
+
+    users.extraUsers.restya-board = {
+      isSystemUser = true;
+      createHome = false;
+      home = runDir;
+      group  = "restya-board";
+    };
+    users.extraGroups.restya-board = {};
+
+    services.postgresql.enable = mkIf (isNull cfg.database.host) true;
+
+    services.postgresql.identMap = optionalString (isNull cfg.database.host)
+      ''
+        restya-board-users restya-board restya_board
+      '';
+
+    services.postgresql.authentication = optionalString (isNull cfg.database.host)
+      ''
+        local restya_board all ident map=restya-board-users
+      '';
+
+  };
+
+}
+
diff --git a/nixos/modules/services/web-apps/tt-rss.nix b/nixos/modules/services/web-apps/tt-rss.nix
index 76b0ee6da968..610c6463a5eb 100644
--- a/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixos/modules/services/web-apps/tt-rss.nix
@@ -99,8 +99,8 @@ let
 
       user = mkOption {
         type = types.str;
-        default = "nginx";
-        example = "nginx";
+        default = "tt_rss";
+        example = "tt_rss";
         description = ''
           User account under which both the update daemon and the web-application run.
         '';
@@ -466,26 +466,28 @@ let
       '';
     };
 
-    services.nginx.virtualHosts = mkIf (cfg.virtualHost != null) {
-      "${cfg.virtualHost}" = {
-        root = "${cfg.root}";
-
-        locations."/" = {
-          index = "index.php";
-        };
-
-        locations."~ \.php$" = {
-          extraConfig = ''
-            fastcgi_split_path_info ^(.+\.php)(/.+)$;
-            fastcgi_pass unix:${phpfpmSocketName};
-            fastcgi_index index.php;
-            fastcgi_param SCRIPT_FILENAME ${cfg.root}/$fastcgi_script_name;
-          '';
+    # NOTE: No configuration is done if not using virtual host
+    services.nginx = mkIf (cfg.virtualHost != null) {
+      enable = true;
+      virtualHosts = {
+        "${cfg.virtualHost}" = {
+          root = "${cfg.root}";
+
+          locations."/" = {
+            index = "index.php";
+          };
+
+          locations."~ \.php$" = {
+            extraConfig = ''
+              fastcgi_split_path_info ^(.+\.php)(/.+)$;
+              fastcgi_pass unix:${phpfpmSocketName};
+              fastcgi_index index.php;
+            '';
+          };
         };
       };
     };
 
-
     systemd.services.tt-rss = let
       dbService = if cfg.database.type == "pgsql" then "postgresql.service" else "mysql.service";
     in {
@@ -496,14 +498,14 @@ let
           callSql = e:
               if cfg.database.type == "pgsql" then ''
                   ${optionalString (cfg.database.password != null) "PGPASSWORD=${cfg.database.password}"} \
-                  ${pkgs.postgresql95}/bin/psql \
+                  ${pkgs.sudo}/bin/sudo -u ${cfg.user} ${config.services.postgresql.package}/bin/psql \
                     -U ${cfg.database.user} \
                     ${optionalString (cfg.database.host != null) "-h ${cfg.database.host} --port ${toString dbPort}"} \
                     -c '${e}' \
                     ${cfg.database.name}''
 
               else if cfg.database.type == "mysql" then ''
-                  echo '${e}' | ${pkgs.mysql}/bin/mysql \
+                  echo '${e}' | ${pkgs.sudo}/bin/sudo -u ${cfg.user} ${config.services.mysql.package}/bin/mysql \
                     -u ${cfg.database.user} \
                     ${optionalString (cfg.database.password != null) "-p${cfg.database.password}"} \
                     ${optionalString (cfg.database.host != null) "-h ${cfg.database.host} -P ${toString dbPort}"} \
@@ -521,6 +523,14 @@ let
         ''
 
         + (optionalString (cfg.database.type == "pgsql") ''
+          ${optionalString (cfg.database.host == null && cfg.database.password == null) ''
+            if ! [ -e ${cfg.root}/.db-created ]; then
+              ${pkgs.sudo}/bin/sudo -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createuser ${cfg.database.user}
+              ${pkgs.sudo}/bin/sudo -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createdb -O ${cfg.database.user} ${cfg.database.name}
+              touch ${cfg.root}/.db-created
+            fi
+          ''}
+
           exists=$(${callSql "select count(*) > 0 from pg_tables where tableowner = user"} \
           | tail -n+3 | head -n-2 | sed -e 's/[ \n\t]*//')
 
@@ -554,5 +564,28 @@ let
         requires = ["${dbService}"];
         after = ["network.target" "${dbService}"];
     };
+
+    services.mysql = optionalAttrs (cfg.database.type == "mysql") {
+      enable = true;
+      package = mkDefault pkgs.mysql;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        {
+          name = cfg.user;
+          ensurePermissions = {
+            "${cfg.database.name}.*" = "ALL PRIVILEGES";
+          };
+        }
+      ];
+    };
+
+    services.postgresql = optionalAttrs (cfg.database.type == "pgsql") {
+      enable = mkDefault true;
+    };
+
+    users = optionalAttrs (cfg.user == "tt_rss") {
+      extraUsers.tt_rss.group = "tt_rss";
+      extraGroups.tt_rss = {};
+    };
   };
 }
diff --git a/nixos/modules/services/web-servers/apache-httpd/owncloud.nix b/nixos/modules/services/web-servers/apache-httpd/owncloud.nix
index 94e85f1f4289..82b8bf3e30db 100644
--- a/nixos/modules/services/web-servers/apache-httpd/owncloud.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/owncloud.nix
@@ -188,8 +188,7 @@ let
       /* date format to be used while writing to the owncloud logfile */
       'logdateformat' => 'F d, Y H:i:s',
 
-      /* timezone used while writing to the owncloud logfile (default: UTC) */
-      'logtimezone' => '${serverInfo.fullConfig.time.timeZone}',
+      ${tzSetting}
 
       /* Append all database queries and parameters to the log file.
        (watch out, this option can increase the size of your log file)*/
@@ -339,6 +338,31 @@ let
 
     '';
 
+  tzSetting = let tz = serverInfo.fullConfig.time.timeZone; in optionalString (!isNull tz) ''
+    /* timezone used while writing to the owncloud logfile (default: UTC) */
+    'logtimezone' => '${tz}',
+  '';
+
+  postgresql = serverInfo.fullConfig.services.postgresql.package;
+
+  setupDb = pkgs.writeScript "setup-owncloud-db" ''
+    #!${pkgs.runtimeShell}
+    PATH="${postgresql}/bin"
+    createuser --no-superuser --no-createdb --no-createrole "${config.dbUser}" || true
+    createdb "${config.dbName}" -O "${config.dbUser}" || true
+    psql -U postgres -d postgres -c "alter user ${config.dbUser} with password '${config.dbPassword}';" || true
+
+    QUERY="CREATE TABLE appconfig
+             ( appid       VARCHAR( 255 ) NOT NULL
+             , configkey   VARCHAR( 255 ) NOT NULL
+             , configvalue VARCHAR( 255 ) NOT NULL
+             );
+           GRANT ALL ON appconfig TO ${config.dbUser};
+           ALTER TABLE appconfig OWNER TO ${config.dbUser};"
+
+    psql -h "/tmp" -U postgres -d ${config.dbName} -Atw -c "$QUERY" || true
+  '';
+
 in
 
 rec {
@@ -353,7 +377,7 @@ rec {
       ''}
 
       <Directory ${config.package}>
-        ${builtins.readFile "${config.package}/.htaccess"}
+        Include ${config.package}/.htaccess
       </Directory>
     '';
 
@@ -373,7 +397,7 @@ rec {
       defaultText = "pkgs.owncloud70";
       example = literalExample "pkgs.owncloud70";
       description = ''
-          PostgreSQL package to use.
+          ownCloud package to use.
       '';
     };
 
@@ -574,13 +598,7 @@ rec {
       chmod -R o-rwx ${config.dataDir}
       chown -R wwwrun:wwwrun ${config.dataDir}
 
-      ${pkgs.postgresql}/bin/createuser -s -r postgres
-      ${pkgs.postgresql}/bin/createuser --no-superuser --no-createdb --no-createrole "${config.dbUser}" || true
-      ${pkgs.postgresql}/bin/createdb "${config.dbName}" -O "${config.dbUser}" || true
-      ${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/psql -U postgres -d postgres -c "alter user ${config.dbUser} with password '${config.dbPassword}';" || true
-
-      QUERY="CREATE TABLE appconfig (appid VARCHAR( 255 ) NOT NULL ,configkey VARCHAR( 255 ) NOT NULL ,configvalue VARCHAR( 255 ) NOT NULL); GRANT ALL ON appconfig TO ${config.dbUser}; ALTER TABLE appconfig OWNER TO ${config.dbUser};"
-      ${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/psql -h "/tmp" -U postgres -d ${config.dbName} -Atw -c "$QUERY" || true
+      ${pkgs.sudo}/bin/sudo -u postgres ${setupDb}
     fi
 
     if [ -e ${config.package}/config/ca-bundle.crt ]; then
@@ -591,7 +609,11 @@ rec {
 
     chown wwwrun:wwwrun ${config.dataDir}/owncloud.log || true
 
-    QUERY="INSERT INTO groups (gid) values('admin'); INSERT INTO users (uid,password) values('${config.adminUser}','${builtins.hashString "sha1" config.adminPassword}'); INSERT INTO group_user (gid,uid) values('admin','${config.adminUser}');"
-    ${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/psql -h "/tmp" -U postgres -d ${config.dbName} -Atw -c "$QUERY" || true
+    QUERY="INSERT INTO groups (gid) values('admin');
+           INSERT INTO users (uid,password)
+             values('${config.adminUser}','${builtins.hashString "sha1" config.adminPassword}');
+           INSERT INTO group_user (gid,uid)
+             values('admin','${config.adminUser}');"
+    ${pkgs.sudo}/bin/sudo -u postgres ${postgresql}/bin/psql -h "/tmp" -U postgres -d ${config.dbName} -Atw -c "$QUERY" || true
   '';
 }
diff --git a/nixos/modules/services/web-servers/apache-httpd/per-server-options.nix b/nixos/modules/services/web-servers/apache-httpd/per-server-options.nix
index 1d53ce659005..4bbd041b6e04 100644
--- a/nixos/modules/services/web-servers/apache-httpd/per-server-options.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/per-server-options.nix
@@ -118,7 +118,7 @@ with lib;
     default = [];
     example = [
       { urlPath = "/foo/bar.png";
-        files = "/home/eelco/some-file.png";
+        file = "/home/eelco/some-file.png";
       }
     ];
     description = ''
diff --git a/nixos/modules/services/web-servers/lighttpd/default.nix b/nixos/modules/services/web-servers/lighttpd/default.nix
index 700b4469c565..d23e810dcc62 100644
--- a/nixos/modules/services/web-servers/lighttpd/default.nix
+++ b/nixos/modules/services/web-servers/lighttpd/default.nix
@@ -50,11 +50,14 @@ let
     "mod_geoip"
     "mod_magnet"
     "mod_mysql_vhost"
+    "mod_openssl"  # since v1.4.46
     "mod_scgi"
     "mod_setenv"
     "mod_trigger_b4_dl"
     "mod_uploadprogress"
+    "mod_vhostdb"  # since v1.4.46
     "mod_webdav"
+    "mod_wstunnel"  # since v1.4.46
   ];
 
   maybeModuleString = moduleName:
diff --git a/nixos/modules/services/web-servers/lighttpd/gitweb.nix b/nixos/modules/services/web-servers/lighttpd/gitweb.nix
index c8d9836b0b68..c494d6966a7f 100644
--- a/nixos/modules/services/web-servers/lighttpd/gitweb.nix
+++ b/nixos/modules/services/web-servers/lighttpd/gitweb.nix
@@ -3,12 +3,10 @@
 with lib;
 
 let
-  cfg = config.services.lighttpd.gitweb;
-  gitwebConfigFile = pkgs.writeText "gitweb.conf" ''
-    # path to git projects (<project>.git)
-    $projectroot = "${cfg.projectroot}";
-    ${cfg.extraConfig}
-  '';
+  cfg = config.services.gitweb;
+  package = pkgs.gitweb.override (optionalAttrs cfg.gitwebTheme {
+    gitwebTheme = true;
+  });
 
 in
 {
@@ -23,26 +21,9 @@ in
       '';
     };
 
-    projectroot = mkOption {
-      default = "/srv/git";
-      type = types.path;
-      description = ''
-        Path to git projects (bare repositories) that should be served by
-        gitweb. Must not end with a slash.
-      '';
-    };
-
-    extraConfig = mkOption {
-      default = "";
-      type = types.lines;
-      description = ''
-        Verbatim configuration text appended to the generated gitweb.conf file.
-      '';
-    };
-
   };
 
-  config = mkIf cfg.enable {
+  config = mkIf config.services.lighttpd.gitweb.enable {
 
     # declare module dependencies
     services.lighttpd.enableModules = [ "mod_cgi" "mod_redirect" "mod_alias" "mod_setenv" ];
@@ -56,11 +37,11 @@ in
               "^/gitweb$" => "/gitweb/"
           )
           alias.url = (
-              "/gitweb/static/" => "${pkgs.git}/share/gitweb/static/",
-              "/gitweb/"        => "${pkgs.git}/share/gitweb/gitweb.cgi"
+              "/gitweb/static/" => "${package}/static/",
+              "/gitweb/"        => "${package}/gitweb.cgi"
           )
           setenv.add-environment = (
-              "GITWEB_CONFIG" => "${gitwebConfigFile}",
+              "GITWEB_CONFIG" => "${cfg.gitwebConfigFile}",
               "HOME" => "${cfg.projectroot}"
           )
       }
diff --git a/nixos/modules/services/web-servers/lighttpd/inginious.nix b/nixos/modules/services/web-servers/lighttpd/inginious.nix
index 669e81d0f14b..8c813d116a52 100644
--- a/nixos/modules/services/web-servers/lighttpd/inginious.nix
+++ b/nixos/modules/services/web-servers/lighttpd/inginious.nix
@@ -113,7 +113,6 @@ in
 
     tasksDirectory = mkOption {
       type = types.path;
-      default = "${inginious}/lib/python2.7/site-packages/inginious/tasks";
       example = "/var/lib/INGInious/tasks";
       description = ''
         Path to the tasks folder.
@@ -218,6 +217,7 @@ in
 
       # Common
       {
+        services.lighttpd.inginious.tasksDirectory = mkDefault "${inginious}/lib/python2.7/site-packages/inginious/tasks";
         # To access inginous tools (like inginious-test-task)
         environment.systemPackages = [ inginious ];
 
diff --git a/nixos/modules/services/web-servers/mighttpd2.nix b/nixos/modules/services/web-servers/mighttpd2.nix
new file mode 100644
index 000000000000..a888f623616e
--- /dev/null
+++ b/nixos/modules/services/web-servers/mighttpd2.nix
@@ -0,0 +1,132 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mighttpd2;
+  configFile = pkgs.writeText "mighty-config" cfg.config;
+  routingFile = pkgs.writeText "mighty-routing" cfg.routing;
+in {
+  options.services.mighttpd2 = {
+    enable = mkEnableOption "Mighttpd2 web server";
+
+    config = mkOption {
+      default = "";
+      example = ''
+        # Example configuration for Mighttpd 2
+        Port: 80
+        # IP address or "*"
+        Host: *
+        Debug_Mode: Yes # Yes or No
+        # If available, "nobody" is much more secure for User:.
+        User: root
+        # If available, "nobody" is much more secure for Group:.
+        Group: root
+        Pid_File: /var/run/mighty.pid
+        Logging: Yes # Yes or No
+        Log_File: /var/log/mighty # The directory must be writable by User:
+        Log_File_Size: 16777216 # bytes
+        Log_Backup_Number: 10
+        Index_File: index.html
+        Index_Cgi: index.cgi
+        Status_File_Dir: /usr/local/share/mighty/status
+        Connection_Timeout: 30 # seconds
+        Fd_Cache_Duration: 10 # seconds
+        # Server_Name: Mighttpd/3.x.y
+        Tls_Port: 443
+        Tls_Cert_File: cert.pem # should change this with an absolute path
+        # should change this with comma-separated absolute paths
+        Tls_Chain_Files: chain.pem
+        # Currently, Tls_Key_File must not be encrypted.
+        Tls_Key_File: privkey.pem # should change this with an absolute path
+        Service: 0 # 0 is HTTP only, 1 is HTTPS only, 2 is both
+      '';
+      type = types.lines;
+      description = ''
+        Verbatim config file to use
+        (see http://www.mew.org/~kazu/proj/mighttpd/en/config.html)
+      '';
+    };
+
+    routing = mkOption {
+      default = "";
+      example = ''
+        # Example routing for Mighttpd 2
+
+        # Domain lists
+        [localhost www.example.com]
+
+        # Entries are looked up in the specified order
+        # All paths must end with "/"
+
+        # A path to CGI scripts should be specified with "=>"
+        /~alice/cgi-bin/ => /home/alice/public_html/cgi-bin/
+
+        # A path to static files should be specified with "->"
+        /~alice/         -> /home/alice/public_html/
+        /cgi-bin/        => /export/cgi-bin/
+
+        # Reverse proxy rules should be specified with ">>"
+        # /path >> host:port/path2
+        # Either "host" or ":port" can be committed, but not both.
+        /app/cal/        >> example.net/calendar/
+        # Yesod app in the same server
+        /app/wiki/       >> 127.0.0.1:3000/
+
+        /                -> /export/www/
+      '';
+      type = types.lines;
+      description = ''
+        Verbatim routing file to use
+        (see http://www.mew.org/~kazu/proj/mighttpd/en/config.html)
+      '';
+    };
+
+    cores = mkOption {
+      default = null;
+      type = types.nullOr types.int;
+      description = ''
+        How many cores to use.
+        If null it will be determined automatically
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    assertions =
+      [ { assertion = cfg.routing != "";
+          message = "You need at least one rule in mighttpd2.routing";
+        }
+      ];
+    systemd.services.mighttpd2 = {
+      description = "Mighttpd2 web server";
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.haskellPackages.mighttpd2}/bin/mighty \
+            ${configFile} \
+            ${routingFile} \
+            +RTS -N${optionalString (cfg.cores != null) "${cfg.cores}"}
+        '';
+        Type = "simple";
+        User = "mighttpd2";
+        Group = "mighttpd2";
+        Restart = "on-failure";
+        AmbientCapabilities = "cap_net_bind_service";
+        CapabilityBoundingSet = "cap_net_bind_service";
+      };
+    };
+
+    users.extraUsers.mighttpd2 = {
+      group = "mighttpd2";
+      uid = config.ids.uids.mighttpd2;
+      isSystemUser = true;
+    };
+
+    users.extraGroups.mighttpd2.gid = config.ids.gids.mighttpd2;
+  };
+
+  meta.maintainers = with lib.maintainers; [ fgaz ];
+}
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 97511aac9737..938a8a1fe334 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -9,12 +9,16 @@ let
       serverName = if vhostConfig.serverName != null
         then vhostConfig.serverName
         else vhostName;
+      acmeDirectory = config.security.acme.directory;
     in
     vhostConfig // {
       inherit serverName;
     } // (optionalAttrs vhostConfig.enableACME {
-      sslCertificate = "/var/lib/acme/${serverName}/fullchain.pem";
-      sslCertificateKey = "/var/lib/acme/${serverName}/key.pem";
+      sslCertificate = "${acmeDirectory}/${serverName}/fullchain.pem";
+      sslCertificateKey = "${acmeDirectory}/${serverName}/key.pem";
+    }) // (optionalAttrs (vhostConfig.useACMEHost != null) {
+      sslCertificate = "${acmeDirectory}/${vhostConfig.useACMEHost}/fullchain.pem";
+      sslCertificateKey = "${acmeDirectory}/${vhostConfig.useACMEHost}/key.pem";
     })
   ) cfg.virtualHosts;
   enableIPv6 = config.networking.enableIPv6;
@@ -167,13 +171,14 @@ let
 
         listenString = { addr, port, ssl, ... }:
           "listen ${addr}:${toString port} "
-          + optionalString ssl "ssl http2 "
+          + optionalString ssl "ssl "
+          + optionalString (ssl && vhost.http2) "http2 "
           + optionalString vhost.default "default_server "
           + ";";
 
         redirectListen = filter (x: !x.ssl) defaultListen;
 
-        acmeLocation = ''
+        acmeLocation = optionalString (vhost.enableACME || vhost.useACMEHost != null) ''
           location /.well-known/acme-challenge {
             ${optionalString (vhost.acmeFallbackHost != null) "try_files $uri @acme-fallback;"}
             root ${vhost.acmeRoot};
@@ -193,7 +198,7 @@ let
             ${concatMapStringsSep "\n" listenString redirectListen}
 
             server_name ${vhost.serverName} ${concatStringsSep " " vhost.serverAliases};
-            ${optionalString vhost.enableACME acmeLocation}
+            ${acmeLocation}
             location / {
               return 301 https://$host$request_uri;
             }
@@ -203,7 +208,7 @@ let
         server {
           ${concatMapStringsSep "\n" listenString hostListen}
           server_name ${vhost.serverName} ${concatStringsSep " " vhost.serverAliases};
-          ${optionalString vhost.enableACME acmeLocation}
+          ${acmeLocation}
           ${optionalString (vhost.root != null) "root ${vhost.root};"}
           ${optionalString (vhost.globalRedirect != null) ''
             return 301 http${optionalString hasSSL "s"}://${vhost.globalRedirect}$request_uri;
@@ -554,6 +559,14 @@ in
           are mutually exclusive.
         '';
       }
+
+      {
+        assertion = all (conf: !(conf.enableACME && conf.useACMEHost != null)) (attrValues virtualHosts);
+        message = ''
+          Options services.nginx.service.virtualHosts.<name>.enableACME and
+          services.nginx.virtualHosts.<name>.useACMEHost are mutually exclusive.
+        '';
+      }
     ];
 
     systemd.services.nginx = {
@@ -566,6 +579,7 @@ in
         mkdir -p ${cfg.stateDir}/logs
         chmod 700 ${cfg.stateDir}
         chown -R ${cfg.user}:${cfg.group} ${cfg.stateDir}
+        ${cfg.package}/bin/nginx -c ${configFile} -p ${cfg.stateDir} -t
         '';
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/nginx -c ${configFile} -p ${cfg.stateDir}";
@@ -579,7 +593,7 @@ in
     security.acme.certs = filterAttrs (n: v: v != {}) (
       let
         vhostsConfigs = mapAttrsToList (vhostName: vhostConfig: vhostConfig) virtualHosts;
-        acmeEnabledVhosts = filter (vhostConfig: vhostConfig.enableACME) vhostsConfigs;
+        acmeEnabledVhosts = filter (vhostConfig: vhostConfig.enableACME && vhostConfig.useACMEHost == null) vhostsConfigs;
         acmePairs = map (vhostConfig: { name = vhostConfig.serverName; value = {
             user = cfg.user;
             group = lib.mkDefault cfg.group;
diff --git a/nixos/modules/services/web-servers/nginx/gitweb.nix b/nixos/modules/services/web-servers/nginx/gitweb.nix
new file mode 100644
index 000000000000..272fd1480185
--- /dev/null
+++ b/nixos/modules/services/web-servers/nginx/gitweb.nix
@@ -0,0 +1,61 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gitweb;
+  package = pkgs.gitweb.override (optionalAttrs cfg.gitwebTheme {
+    gitwebTheme = true;
+  });
+
+in
+{
+
+  options.services.nginx.gitweb = {
+
+    enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = ''
+        If true, enable gitweb in nginx. Access it at http://yourserver/gitweb
+      '';
+    };
+
+  };
+
+  config = mkIf config.services.nginx.gitweb.enable {
+
+    systemd.services.gitweb = {
+      description = "GitWeb service";
+      script = "${package}/gitweb.cgi --fastcgi --nproc=1";
+      environment  = {
+        FCGI_SOCKET_PATH = "/run/gitweb/gitweb.sock";
+      };
+      serviceConfig = {
+        User = "nginx";
+        Group = "nginx";
+        RuntimeDirectory = [ "gitweb" ];
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    services.nginx = {
+      virtualHosts.default = {
+        locations."/gitweb/static/" = {
+          alias = "${package}/static/";
+        };
+        locations."/gitweb/" = {
+          extraConfig = ''
+            include ${pkgs.nginx}/conf/fastcgi_params;
+            fastcgi_param GITWEB_CONFIG ${cfg.gitwebConfigFile};
+            fastcgi_pass unix:/run/gitweb/gitweb.sock;
+          '';
+        };
+      };
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ gnidorah ];
+
+}
diff --git a/nixos/modules/services/web-servers/nginx/vhost-options.nix b/nixos/modules/services/web-servers/nginx/vhost-options.nix
index 801601aafd9d..bf18108a1a3c 100644
--- a/nixos/modules/services/web-servers/nginx/vhost-options.nix
+++ b/nixos/modules/services/web-servers/nginx/vhost-options.nix
@@ -48,7 +48,21 @@ with lib;
     enableACME = mkOption {
       type = types.bool;
       default = false;
-      description = "Whether to ask Let's Encrypt to sign a certificate for this vhost.";
+      description = ''
+        Whether to ask Let's Encrypt to sign a certificate for this vhost.
+        Alternately, you can use an existing certificate through <option>useACMEHost</option>.
+      '';
+    };
+
+    useACMEHost = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = ''
+        A host of an existing Let's Encrypt certificate to use.
+        This is useful if you have many subdomains and want to avoid hitting the
+        <link xlink:href="https://letsencrypt.org/docs/rate-limits/">rate limit</link>.
+        Alternately, you can generate a certificate through <option>enableACME</option>.
+      '';
     };
 
     acmeRoot = mkOption {
@@ -114,6 +128,20 @@ with lib;
       description = "Path to server SSL certificate key.";
     };
 
+    http2 = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Whether to enable HTTP 2.
+        Note that (as of writing) due to nginx's implementation, to disable
+        HTTP 2 you have to disable it on all vhosts that use a given
+        IP address / port.
+        If there is one server block configured to enable http2,then it is
+        enabled for all server blocks on this IP.
+        See https://stackoverflow.com/a/39466948/263061.
+      '';
+    };
+
     root = mkOption {
       type = types.nullOr types.path;
       default = null;
diff --git a/nixos/modules/services/web-servers/tomcat.nix b/nixos/modules/services/web-servers/tomcat.nix
index 943415e08c64..aa94e0e976c9 100644
--- a/nixos/modules/services/web-servers/tomcat.nix
+++ b/nixos/modules/services/web-servers/tomcat.nix
@@ -19,27 +19,43 @@ in
   options = {
 
     services.tomcat = {
-
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable Apache Tomcat";
-      };
+      enable = mkEnableOption "Apache Tomcat";
 
       package = mkOption {
         type = types.package;
         default = pkgs.tomcat85;
         defaultText = "pkgs.tomcat85";
-        example = lib.literalExample "pkgs.tomcatUnstable";
+        example = lib.literalExample "pkgs.tomcat9";
         description = ''
           Which tomcat package to use.
         '';
       };
 
       baseDir = mkOption {
+        type = lib.types.path;
         default = "/var/tomcat";
         description = "Location where Tomcat stores configuration files, webapplications and logfiles";
       };
 
+      logDirs = mkOption {
+        default = [];
+        type = types.listOf types.path;
+        description = "Directories to create in baseDir/logs/";
+      };
+
+      extraConfigFiles = mkOption {
+        default = [];
+        type = types.listOf types.path;
+        description = "Extra configuration files to pull into the tomcat conf directory";
+      };
+
+      extraEnvironment = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "ENVIRONMENT=production" ];
+        description = "Environment Variables to pass to the tomcat service";
+      };
+
       extraGroups = mkOption {
         default = [];
         example = [ "users" ];
@@ -47,31 +63,46 @@ in
       };
 
       user = mkOption {
+        type = types.str;
         default = "tomcat";
         description = "User account under which Apache Tomcat runs.";
       };
 
       group = mkOption {
+        type = types.str;
         default = "tomcat";
         description = "Group account under which Apache Tomcat runs.";
       };
 
       javaOpts = mkOption {
+        type = types.either (types.listOf types.str) types.str;
         default = "";
         description = "Parameters to pass to the Java Virtual Machine which spawns Apache Tomcat";
       };
 
       catalinaOpts = mkOption {
+        type = types.either (types.listOf types.str) types.str;
         default = "";
         description = "Parameters to pass to the Java Virtual Machine which spawns the Catalina servlet container";
       };
 
       sharedLibs = mkOption {
+        type = types.listOf types.str;
         default = [];
         description = "List containing JAR files or directories with JAR files which are libraries shared by the web applications";
       };
 
+      serverXml = mkOption {
+        type = types.lines;
+        default = "";
+        description = "
+          Verbatim server.xml configuration.
+          This is mutually exclusive with the virtualHosts options.
+        ";
+      };
+
       commonLibs = mkOption {
+        type = types.listOf types.str;
         default = [];
         description = "List containing JAR files or directories with JAR files which are libraries shared by the web applications and the servlet container";
       };
@@ -84,11 +115,21 @@ in
       };
 
       virtualHosts = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            name = mkOption {
+              type = types.listOf types.str;
+              description = "name of the virtualhost";
+              default = [];
+            };
+          };
+        });
         default = [];
         description = "List consisting of a virtual host name and a list of web applications to deploy on each virtual host";
       };
 
       logPerVirtualHost = mkOption {
+        type = types.bool;
         default = false;
         description = "Whether to enable logging per virtual host.";
       };
@@ -104,11 +145,13 @@ in
 
         enable = mkOption {
           default = false;
+          type = types.bool;
           description = "Whether to enable an Apache Axis2 container";
         };
 
         services = mkOption {
           default = [];
+          type = types.listOf types.str;
           description = "List containing AAR files or directories with AAR files which are web services to be deployed on Axis2";
         };
 
@@ -140,130 +183,104 @@ in
       description = "Apache Tomcat server";
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
-      serviceConfig.Type = "oneshot";
-      serviceConfig.RemainAfterExit = true;
 
       preStart = ''
         # Create the base directory
-        mkdir -p ${cfg.baseDir}
+        mkdir -p \
+          ${cfg.baseDir}/{conf,virtualhosts,logs,temp,lib,shared/lib,webapps,work}
+        chown ${cfg.user}:${cfg.group} \
+          ${cfg.baseDir}/{conf,virtualhosts,logs,temp,lib,shared/lib,webapps,work}
 
         # Create a symlink to the bin directory of the tomcat component
         ln -sfn ${tomcat}/bin ${cfg.baseDir}/bin
 
-        # Create a conf/ directory
-        mkdir -p ${cfg.baseDir}/conf
-        chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/conf
-
         # Symlink the config files in the conf/ directory (except for catalina.properties and server.xml)
-        for i in $(ls ${tomcat}/conf | grep -v catalina.properties | grep -v server.xml)
-        do
-            ln -sfn ${tomcat}/conf/$i ${cfg.baseDir}/conf/`basename $i`
+        for i in $(ls ${tomcat}/conf | grep -v catalina.properties | grep -v server.xml); do
+          ln -sfn ${tomcat}/conf/$i ${cfg.baseDir}/conf/`basename $i`
         done
 
-        # Create subdirectory for virtual hosts
-        mkdir -p ${cfg.baseDir}/virtualhosts
+        ${if cfg.extraConfigFiles != [] then ''
+          for i in ${toString cfg.extraConfigFiles}; do
+            ln -sfn $i ${cfg.baseDir}/conf/`basename $i`
+          done
+        '' else ""}
 
         # Create a modified catalina.properties file
         # Change all references from CATALINA_HOME to CATALINA_BASE and add support for shared libraries
         sed -e 's|''${catalina.home}|''${catalina.base}|g' \
-            -e 's|shared.loader=|shared.loader=''${catalina.base}/shared/lib/*.jar|' \
-            ${tomcat}/conf/catalina.properties > ${cfg.baseDir}/conf/catalina.properties
-
-        # Create a modified server.xml which also includes all virtual hosts
-        sed -e "/<Engine name=\"Catalina\" defaultHost=\"localhost\">/a\  ${
-                     toString (map (virtualHost: ''<Host name=\"${virtualHost.name}\" appBase=\"virtualhosts/${virtualHost.name}/webapps\" unpackWARs=\"true\" autoDeploy=\"true\" xmlValidation=\"false\" xmlNamespaceAware=\"false\" >${if cfg.logPerVirtualHost then ''<Valve className=\"org.apache.catalina.valves.AccessLogValve\" directory=\"logs/${virtualHost.name}\"  prefix=\"${virtualHost.name}_access_log.\" pattern=\"combined\" resolveHosts=\"false\"/>'' else ""}</Host>'') cfg.virtualHosts)}" \
-            ${tomcat}/conf/server.xml > ${cfg.baseDir}/conf/server.xml
-
-        # Create a logs/ directory
-        mkdir -p ${cfg.baseDir}/logs
-        chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/logs
-        ${if cfg.logPerVirtualHost then
-           toString (map (h: ''
-                                mkdir -p ${cfg.baseDir}/logs/${h.name}
-                                chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/logs/${h.name}
-                             '') cfg.virtualHosts) else ''''}
-
-        # Create a temp/ directory
-        mkdir -p ${cfg.baseDir}/temp
-        chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/temp
-
-        # Create a lib/ directory
-        mkdir -p ${cfg.baseDir}/lib
-        chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/lib
-
-        # Create a shared/lib directory
-        mkdir -p ${cfg.baseDir}/shared/lib
-        chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/shared/lib
-
-        # Create a webapps/ directory
-        mkdir -p ${cfg.baseDir}/webapps
-        chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/webapps
+          -e 's|shared.loader=|shared.loader=''${catalina.base}/shared/lib/*.jar|' \
+          ${tomcat}/conf/catalina.properties > ${cfg.baseDir}/conf/catalina.properties
+
+        ${if cfg.serverXml != "" then ''
+          cp -f ${pkgs.writeTextDir "server.xml" cfg.serverXml}/* ${cfg.baseDir}/conf/
+          '' else ''
+          # Create a modified server.xml which also includes all virtual hosts
+          sed -e "/<Engine name=\"Catalina\" defaultHost=\"localhost\">/a\  ${toString (map (virtualHost: ''<Host name=\"${virtualHost.name}\" appBase=\"virtualhosts/${virtualHost.name}/webapps\" unpackWARs=\"true\" autoDeploy=\"true\" xmlValidation=\"false\" xmlNamespaceAware=\"false\" >${if cfg.logPerVirtualHost then ''<Valve className=\"org.apache.catalina.valves.AccessLogValve\" directory=\"logs/${virtualHost.name}\"  prefix=\"${virtualHost.name}_access_log.\" pattern=\"combined\" resolveHosts=\"false\"/>'' else ""}</Host>'') cfg.virtualHosts)}" \
+                ${tomcat}/conf/server.xml > ${cfg.baseDir}/conf/server.xml
+          ''
+        }
+        ${optionalString (cfg.logDirs != []) ''
+          for i in ${toString cfg.logDirs}; do
+            mkdir -p ${cfg.baseDir}/logs/$i
+            chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/logs/$i
+          done
+        ''}
+        ${optionalString cfg.logPerVirtualHost (toString (map (h: ''
+          mkdir -p ${cfg.baseDir}/logs/${h.name}
+          chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/logs/${h.name}
+        '') cfg.virtualHosts))}
 
         # Symlink all the given common libs files or paths into the lib/ directory
-        for i in ${tomcat} ${toString cfg.commonLibs}
-        do
-            if [ -f $i ]
-            then
-                # If the given web application is a file, symlink it into the common/lib/ directory
-                ln -sfn $i ${cfg.baseDir}/lib/`basename $i`
-            elif [ -d $i ]
-            then
-                # If the given web application is a directory, then iterate over the files
-                # in the special purpose directories and symlink them into the tomcat tree
-
-                for j in $i/lib/*
-                do
-                    ln -sfn $j ${cfg.baseDir}/lib/`basename $j`
-                done
-            fi
+        for i in ${tomcat} ${toString cfg.commonLibs}; do
+          if [ -f $i ]; then
+            # If the given web application is a file, symlink it into the common/lib/ directory
+            ln -sfn $i ${cfg.baseDir}/lib/`basename $i`
+          elif [ -d $i ]; then
+            # If the given web application is a directory, then iterate over the files
+            # in the special purpose directories and symlink them into the tomcat tree
+
+            for j in $i/lib/*; do
+              ln -sfn $j ${cfg.baseDir}/lib/`basename $j`
+            done
+          fi
         done
 
         # Symlink all the given shared libs files or paths into the shared/lib/ directory
-        for i in ${toString cfg.sharedLibs}
-        do
-            if [ -f $i ]
-            then
-                # If the given web application is a file, symlink it into the common/lib/ directory
-                ln -sfn $i ${cfg.baseDir}/shared/lib/`basename $i`
-            elif [ -d $i ]
-            then
-                # If the given web application is a directory, then iterate over the files
-                # in the special purpose directories and symlink them into the tomcat tree
-
-                for j in $i/shared/lib/*
-                do
-                    ln -sfn $j ${cfg.baseDir}/shared/lib/`basename $j`
-                done
-            fi
+        for i in ${toString cfg.sharedLibs}; do
+          if [ -f $i ]; then
+            # If the given web application is a file, symlink it into the common/lib/ directory
+            ln -sfn $i ${cfg.baseDir}/shared/lib/`basename $i`
+          elif [ -d $i ]; then
+            # If the given web application is a directory, then iterate over the files
+            # in the special purpose directories and symlink them into the tomcat tree
+
+            for j in $i/shared/lib/*; do
+              ln -sfn $j ${cfg.baseDir}/shared/lib/`basename $j`
+            done
+          fi
         done
 
         # Symlink all the given web applications files or paths into the webapps/ directory
-        for i in ${toString cfg.webapps}
-        do
-            if [ -f $i ]
-            then
-                # If the given web application is a file, symlink it into the webapps/ directory
-                ln -sfn $i ${cfg.baseDir}/webapps/`basename $i`
-            elif [ -d $i ]
-            then
-                # If the given web application is a directory, then iterate over the files
-                # in the special purpose directories and symlink them into the tomcat tree
-
-                for j in $i/webapps/*
-                do
-                    ln -sfn $j ${cfg.baseDir}/webapps/`basename $j`
-                done
+        for i in ${toString cfg.webapps}; do
+          if [ -f $i ]; then
+            # If the given web application is a file, symlink it into the webapps/ directory
+            ln -sfn $i ${cfg.baseDir}/webapps/`basename $i`
+          elif [ -d $i ]; then
+            # If the given web application is a directory, then iterate over the files
+            # in the special purpose directories and symlink them into the tomcat tree
+
+            for j in $i/webapps/*; do
+              ln -sfn $j ${cfg.baseDir}/webapps/`basename $j`
+            done
 
-                # Also symlink the configuration files if they are included
-                if [ -d $i/conf/Catalina ]
-                then
-                    for j in $i/conf/Catalina/*
-                    do
-                        mkdir -p ${cfg.baseDir}/conf/Catalina/localhost
-                        ln -sfn $j ${cfg.baseDir}/conf/Catalina/localhost/`basename $j`
-                    done
-                fi
+            # Also symlink the configuration files if they are included
+            if [ -d $i/conf/Catalina ]; then
+              for j in $i/conf/Catalina/*; do
+                mkdir -p ${cfg.baseDir}/conf/Catalina/localhost
+                ln -sfn $j ${cfg.baseDir}/conf/Catalina/localhost/`basename $j`
+              done
             fi
+          fi
         done
 
         ${toString (map (virtualHost: ''
@@ -275,94 +292,79 @@ in
 
           # Symlink all the given web applications files or paths into the webapps/ directory
           # of this virtual host
-          for i in "${if virtualHost ? webapps then toString virtualHost.webapps else ""}"
-          do
-              if [ -f $i ]
-              then
-                  # If the given web application is a file, symlink it into the webapps/ directory
-                  ln -sfn $i ${cfg.baseDir}/virtualhosts/${virtualHost.name}/webapps/`basename $i`
-              elif [ -d $i ]
-              then
-                  # If the given web application is a directory, then iterate over the files
-                  # in the special purpose directories and symlink them into the tomcat tree
-
-                  for j in $i/webapps/*
-                  do
-                      ln -sfn $j ${cfg.baseDir}/virtualhosts/${virtualHost.name}/webapps/`basename $j`
-                  done
-
-                  # Also symlink the configuration files if they are included
-                  if [ -d $i/conf/Catalina ]
-                  then
-                      for j in $i/conf/Catalina/*
-                      do
-                          mkdir -p ${cfg.baseDir}/conf/Catalina/${virtualHost.name}
-                          ln -sfn $j ${cfg.baseDir}/conf/Catalina/${virtualHost.name}/`basename $j`
-                      done
-                  fi
+          for i in "${if virtualHost ? webapps then toString virtualHost.webapps else ""}"; do
+            if [ -f $i ]; then
+              # If the given web application is a file, symlink it into the webapps/ directory
+              ln -sfn $i ${cfg.baseDir}/virtualhosts/${virtualHost.name}/webapps/`basename $i`
+            elif [ -d $i ]; then
+              # If the given web application is a directory, then iterate over the files
+              # in the special purpose directories and symlink them into the tomcat tree
+
+              for j in $i/webapps/*; do
+                ln -sfn $j ${cfg.baseDir}/virtualhosts/${virtualHost.name}/webapps/`basename $j`
+              done
+
+              # Also symlink the configuration files if they are included
+              if [ -d $i/conf/Catalina ]; then
+                for j in $i/conf/Catalina/*; do
+                  mkdir -p ${cfg.baseDir}/conf/Catalina/${virtualHost.name}
+                  ln -sfn $j ${cfg.baseDir}/conf/Catalina/${virtualHost.name}/`basename $j`
+                done
               fi
+            fi
           done
-
-          ''
-        ) cfg.virtualHosts) }
-
-        # Create a work/ directory
-        mkdir -p ${cfg.baseDir}/work
-        chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/work
-
-        ${if cfg.axis2.enable then
-            ''
-            # Copy the Axis2 web application
-            cp -av ${pkgs.axis2}/webapps/axis2 ${cfg.baseDir}/webapps
-
-            # Turn off addressing, which causes many errors
-            sed -i -e 's%<module ref="addressing"/>%<!-- <module ref="addressing"/> -->%' ${cfg.baseDir}/webapps/axis2/WEB-INF/conf/axis2.xml
-
-            # Modify permissions on the Axis2 application
-            chown -R ${cfg.user}:${cfg.group} ${cfg.baseDir}/webapps/axis2
-
-            # Symlink all the given web service files or paths into the webapps/axis2/WEB-INF/services directory
-            for i in ${toString cfg.axis2.services}
-            do
-                if [ -f $i ]
-                then
-                    # If the given web service is a file, symlink it into the webapps/axis2/WEB-INF/services
-                    ln -sfn $i ${cfg.baseDir}/webapps/axis2/WEB-INF/services/`basename $i`
-                elif [ -d $i ]
-                then
-                    # If the given web application is a directory, then iterate over the files
-                    # in the special purpose directories and symlink them into the tomcat tree
-
-                    for j in $i/webapps/axis2/WEB-INF/services/*
-                    do
-                        ln -sfn $j ${cfg.baseDir}/webapps/axis2/WEB-INF/services/`basename $j`
-                    done
-
-                    # Also symlink the configuration files if they are included
-                    if [ -d $i/conf/Catalina ]
-                    then
-                        for j in $i/conf/Catalina/*
-                        do
-                            ln -sfn $j ${cfg.baseDir}/conf/Catalina/localhost/`basename $j`
-                        done
-                    fi
-                fi
-            done
-            ''
-        else ""}
-      '';
-
-      script = ''
-          ${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${cfg.user} -c 'CATALINA_BASE=${cfg.baseDir} JAVA_HOME=${cfg.jdk} JAVA_OPTS="${cfg.javaOpts}" CATALINA_OPTS="${cfg.catalinaOpts}" ${tomcat}/bin/startup.sh'
-      '';
-
-      preStop = ''
-        echo "Stopping tomcat..."
-        CATALINA_BASE=${cfg.baseDir} JAVA_HOME=${cfg.jdk} ${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${cfg.user} -c ${tomcat}/bin/shutdown.sh
+        '') cfg.virtualHosts)}
+
+        ${optionalString cfg.axis2.enable ''
+          # Copy the Axis2 web application
+          cp -av ${pkgs.axis2}/webapps/axis2 ${cfg.baseDir}/webapps
+
+          # Turn off addressing, which causes many errors
+          sed -i -e 's%<module ref="addressing"/>%<!-- <module ref="addressing"/> -->%' ${cfg.baseDir}/webapps/axis2/WEB-INF/conf/axis2.xml
+
+          # Modify permissions on the Axis2 application
+          chown -R ${cfg.user}:${cfg.group} ${cfg.baseDir}/webapps/axis2
+
+          # Symlink all the given web service files or paths into the webapps/axis2/WEB-INF/services directory
+          for i in ${toString cfg.axis2.services}; do
+            if [ -f $i ]; then
+              # If the given web service is a file, symlink it into the webapps/axis2/WEB-INF/services
+              ln -sfn $i ${cfg.baseDir}/webapps/axis2/WEB-INF/services/`basename $i`
+            elif [ -d $i ]; then
+              # If the given web application is a directory, then iterate over the files
+              # in the special purpose directories and symlink them into the tomcat tree
+
+              for j in $i/webapps/axis2/WEB-INF/services/*; do
+                ln -sfn $j ${cfg.baseDir}/webapps/axis2/WEB-INF/services/`basename $j`
+              done
+
+              # Also symlink the configuration files if they are included
+              if [ -d $i/conf/Catalina ]; then
+                for j in $i/conf/Catalina/*; do
+                  ln -sfn $j ${cfg.baseDir}/conf/Catalina/localhost/`basename $j`
+                done
+              fi
+            fi
+          done
+        ''}
       '';
 
+      serviceConfig = {
+        Type = "forking";
+        PermissionsStartOnly = true;
+        PIDFile="/run/tomcat/tomcat.pid";
+        RuntimeDirectory = "tomcat";
+        User = cfg.user;
+        Environment=[
+          "CATALINA_BASE=${cfg.baseDir}"
+          "CATALINA_PID=/run/tomcat/tomcat.pid"
+          "JAVA_HOME='${cfg.jdk}'"
+          "JAVA_OPTS='${builtins.toString cfg.javaOpts}'"
+          "CATALINA_OPTS='${builtins.toString cfg.catalinaOpts}'"
+        ] ++ cfg.extraEnvironment;
+        ExecStart = "${tomcat}/bin/startup.sh";
+        ExecStop = "${tomcat}/bin/shutdown.sh";
+      };
     };
-
   };
-
 }
diff --git a/nixos/modules/services/web-servers/traefik.nix b/nixos/modules/services/web-servers/traefik.nix
index 4ede4fc20967..b6c7fef21fb2 100644
--- a/nixos/modules/services/web-servers/traefik.nix
+++ b/nixos/modules/services/web-servers/traefik.nix
@@ -64,6 +64,16 @@ in {
       '';
     };
 
+    group = mkOption {
+      default = "traefik";
+      type = types.string;
+      example = "docker";
+      description = ''
+        Set the group that traefik runs under.
+        For the docker backend this needs to be set to <literal>docker</literal> instead.
+      '';
+    };
+
     package = mkOption {
       default = pkgs.traefik;
       defaultText = "pkgs.traefik";
@@ -87,7 +97,7 @@ in {
         ];
         Type = "simple";
         User = "traefik";
-        Group = "traefik";
+        Group = cfg.group;
         Restart = "on-failure";
         StartLimitInterval = 86400;
         StartLimitBurst = 5;
diff --git a/nixos/modules/services/web-servers/varnish/default.nix b/nixos/modules/services/web-servers/varnish/default.nix
index c3bc065d4651..bc74d62b116a 100644
--- a/nixos/modules/services/web-servers/varnish/default.nix
+++ b/nixos/modules/services/web-servers/varnish/default.nix
@@ -1,14 +1,27 @@
 { config, lib, pkgs, ...}:
+
+with lib;
+
 let
   cfg = config.services.varnish;
 
+  commandLine = "-f ${pkgs.writeText "default.vcl" cfg.config}" +
+      optionalString (cfg.extraModules != []) " -p vmod_path='${makeSearchPathOutput "lib" "lib/varnish/vmods" ([cfg.package] ++ cfg.extraModules)}' -r vmod_path";
 in
-with lib;
 {
   options = {
     services.varnish = {
       enable = mkEnableOption "Varnish Server";
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.varnish5;
+        defaultText = "pkgs.varnish5";
+        description = ''
+          The package to use
+        '';
+      };
+
       http_address = mkOption {
         type = types.str;
         default = "*:6081";
@@ -35,7 +48,7 @@ with lib;
       extraModules = mkOption {
         type = types.listOf types.package;
         default = [];
-        example = literalExample "[ pkgs.varnish-geoip ]";
+        example = literalExample "[ pkgs.varnish5Packages.geoip ]";
         description = "
           Varnish modules (except 'std').
         ";
@@ -69,8 +82,7 @@ with lib;
       serviceConfig = {
         Type = "simple";
         PermissionsStartOnly = true;
-        ExecStart = "${pkgs.varnish}/sbin/varnishd -a ${cfg.http_address} -f ${pkgs.writeText "default.vcl" cfg.config} -n ${cfg.stateDir} -F ${cfg.extraCommandLine}"
-          + optionalString (cfg.extraModules != []) " -p vmod_path='${makeSearchPathOutput "lib" "lib/varnish/vmods" ([pkgs.varnish] ++ cfg.extraModules)}' -r vmod_path";
+        ExecStart = "${cfg.package}/sbin/varnishd -a ${cfg.http_address} -n ${cfg.stateDir} -F ${cfg.extraCommandLine} ${commandLine}";
         Restart = "always";
         RestartSec = "5s";
         User = "varnish";
@@ -81,7 +93,15 @@ with lib;
       };
     };
 
-    environment.systemPackages = [ pkgs.varnish ];
+    environment.systemPackages = [ cfg.package ];
+
+    # check .vcl syntax at compile time (e.g. before nixops deployment)
+    system.extraDependencies = [
+      (pkgs.stdenv.mkDerivation {
+        name = "check-varnish-syntax";
+        buildCommand = "${cfg.package}/sbin/varnishd -C ${commandLine} 2> $out || (cat $out; exit 1)";
+      })
+    ];
 
     users.extraUsers.varnish = {
       group = "varnish";
diff --git a/nixos/modules/services/x11/compton.nix b/nixos/modules/services/x11/compton.nix
index 11cbcac6fa85..8641c05de52e 100644
--- a/nixos/modules/services/x11/compton.nix
+++ b/nixos/modules/services/x11/compton.nix
@@ -181,10 +181,10 @@ in {
     };
 
     backend = mkOption {
-      type = types.enum [ "glx" "xrender" ];
+      type = types.enum [ "glx" "xrender" "xr_glx_hybrid" ];
       default = "xrender";
       description = ''
-        Backend to use: <literal>glx</literal> or <literal>xrender</literal>.
+        Backend to use: <literal>glx</literal>, <literal>xrender</literal> or <literal>xr_glx_hybrid</literal>.
       '';
     };
 
diff --git a/nixos/modules/services/x11/desktop-managers/default.nix b/nixos/modules/services/x11/desktop-managers/default.nix
index 39b27d4ceb61..f435e85f6b83 100644
--- a/nixos/modules/services/x11/desktop-managers/default.nix
+++ b/nixos/modules/services/x11/desktop-managers/default.nix
@@ -87,11 +87,11 @@ in
 
       default = mkOption {
         type = types.str;
-        default = "none";
-        example = "plasma5";
+        default = "";
+        example = "none";
         description = "Default desktop manager loaded if none have been chosen.";
         apply = defaultDM:
-          if defaultDM == "none" && cfg.session.list != [] then
+          if defaultDM == "" && cfg.session.list != [] then
             (head cfg.session.list).name
           else if any (w: w.name == defaultDM) cfg.session.list then
             defaultDM
@@ -109,9 +109,5 @@ in
 
   };
 
-  config = {
-    services.xserver.displayManager.session = cfg.session.list;
-    environment.systemPackages =
-      mkIf cfg.session.needBGPackages [ pkgs.feh ]; # xsetroot via xserver.enable
-  };
+  config.services.xserver.displayManager.session = cfg.session.list;
 }
diff --git a/nixos/modules/services/x11/desktop-managers/enlightenment.nix b/nixos/modules/services/x11/desktop-managers/enlightenment.nix
index 8a523f0d8036..da3287aaea6e 100644
--- a/nixos/modules/services/x11/desktop-managers/enlightenment.nix
+++ b/nixos/modules/services/x11/desktop-managers/enlightenment.nix
@@ -33,7 +33,7 @@ in
       pkgs.xorg.xauth # used by kdesu
       pkgs.gtk2 # To get GTK+'s themes.
       pkgs.tango-icon-theme
-      pkgs.shared_mime_info
+      pkgs.shared-mime-info
       pkgs.gnome2.gnomeicontheme
       pkgs.xorg.xcursorthemes
     ];
@@ -47,7 +47,7 @@ in
         export GTK_DATA_PREFIX=${config.system.path}
         # find theme engines
         export GTK_PATH=${config.system.path}/lib/gtk-3.0:${config.system.path}/lib/gtk-2.0
-        export XDG_MENU_PREFIX=enlightenment
+        export XDG_MENU_PREFIX=e-
 
         export GST_PLUGIN_PATH="${GST_PLUGIN_PATH}"
 
diff --git a/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixos/modules/services/x11/desktop-managers/gnome3.nix
index e5a79496c7a5..10e8ef0ed381 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -27,7 +27,7 @@ let
   nixos-gsettings-desktop-schemas = pkgs.runCommand "nixos-gsettings-desktop-schemas" {}
     ''
      mkdir -p $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
-     cp -rf ${pkgs.gnome3.gsettings_desktop_schemas}/share/gsettings-schemas/gsettings-desktop-schemas*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
+     cp -rf ${pkgs.gnome3.gsettings-desktop-schemas}/share/gsettings-schemas/gsettings-desktop-schemas*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
 
      ${concatMapStrings (pkg: "cp -rf ${pkg}/share/gsettings-schemas/*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas\n") cfg.extraGSettingsOverridePackages}
 
@@ -60,7 +60,7 @@ in {
         example = literalExample "[ pkgs.gnome3.gpaste ]";
         description = "Additional list of packages to be added to the session search path.
                        Useful for gnome shell extensions or gsettings-conditionated autostart.";
-        apply = list: list ++ [ pkgs.gnome3.gnome_shell pkgs.gnome3.gnome-shell-extensions ];
+        apply = list: list ++ [ pkgs.gnome3.gnome-shell pkgs.gnome3.gnome-shell-extensions ];
       };
 
       extraGSettingsOverrides = mkOption {
@@ -108,6 +108,7 @@ in {
     services.gnome3.seahorse.enable = mkDefault true;
     services.gnome3.sushi.enable = mkDefault true;
     services.gnome3.tracker.enable = mkDefault true;
+    services.gnome3.tracker-miners.enable = mkDefault true;
     hardware.pulseaudio.enable = mkDefault true;
     services.telepathy.enable = mkDefault true;
     networking.networkmanager.enable = mkDefault true;
@@ -117,13 +118,13 @@ in {
     services.packagekit.enable = mkDefault true;
     hardware.bluetooth.enable = mkDefault true;
     services.xserver.libinput.enable = mkDefault true; # for controlling touchpad settings via gnome control center
-    services.udev.packages = [ pkgs.gnome3.gnome_settings_daemon ];
+    services.udev.packages = [ pkgs.gnome3.gnome-settings-daemon ];
     systemd.packages = [ pkgs.gnome3.vino ];
 
     # If gnome3 is installed, build vim for gtk3 too.
     nixpkgs.config.vim.gui = "gtk3";
 
-    fonts.fonts = [ pkgs.dejavu_fonts pkgs.cantarell_fonts ];
+    fonts.fonts = [ pkgs.dejavu_fonts pkgs.cantarell-fonts ];
 
     services.xserver.desktopManager.session = singleton
       { name = "gnome3";
@@ -135,7 +136,7 @@ in {
           # find theme engines
           export GTK_PATH=${config.system.path}/lib/gtk-3.0:${config.system.path}/lib/gtk-2.0
 
-          export XDG_MENU_PREFIX=gnome
+          export XDG_MENU_PREFIX=gnome-
 
           ${concatMapStrings (p: ''
             if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
@@ -152,7 +153,7 @@ in {
           export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${mimeAppsList}/share
 
           # Override gsettings-desktop-schema
-          export XDG_DATA_DIRS=${nixos-gsettings-desktop-schemas}/share/gsettings-schemas/nixos-gsettings-overrides''${XDG_DATA_DIRS:+:}$XDG_DATA_DIRS
+          export NIX_GSETTINGS_OVERRIDES_DIR=${nixos-gsettings-desktop-schemas}/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
 
           # Let nautilus find extensions
           export NAUTILUS_EXTENSION_DIR=${config.system.path}/lib/nautilus/extensions-3.0/
@@ -163,7 +164,7 @@ in {
           # Update user dirs as described in http://freedesktop.org/wiki/Software/xdg-user-dirs/
           ${pkgs.xdg-user-dirs}/bin/xdg-user-dirs-update
 
-          ${pkgs.gnome3.gnome_session}/bin/gnome-session ${optionalString cfg.debug "--debug"} &
+          ${pkgs.gnome3.gnome-session}/bin/gnome-session ${optionalString cfg.debug "--debug"} &
           waitPID=$!
         '';
       };
@@ -171,7 +172,7 @@ in {
     services.xserver.updateDbusEnvironment = true;
 
     environment.variables.GIO_EXTRA_MODULES = [ "${lib.getLib pkgs.gnome3.dconf}/lib/gio/modules"
-                                                "${pkgs.gnome3.glib_networking.out}/lib/gio/modules"
+                                                "${pkgs.gnome3.glib-networking.out}/lib/gio/modules"
                                                 "${pkgs.gnome3.gvfs}/lib/gio/modules" ];
     environment.systemPackages = pkgs.gnome3.corePackages ++ cfg.sessionPath
       ++ (removePackagesByName pkgs.gnome3.optionalPackages config.environment.gnome3.excludePackages);
@@ -179,10 +180,9 @@ in {
     # Use the correct gnome3 packageSet
     networking.networkmanager.basePackages =
       { inherit (pkgs) networkmanager modemmanager wpa_supplicant;
-        inherit (pkgs.gnome3) networkmanager_openvpn networkmanager_vpnc
-                              networkmanager_openconnect networkmanager_fortisslvpn
-                              networkmanager_pptp networkmanager_iodine
-                              networkmanager_l2tp; };
+        inherit (pkgs.gnome3) networkmanager-openvpn networkmanager-vpnc
+                              networkmanager-openconnect networkmanager-fortisslvpn
+                              networkmanager-iodine networkmanager-l2tp; };
 
     # Needed for themes and backgrounds
     environment.pathsToLink = [ "/share" ];
diff --git a/nixos/modules/services/x11/desktop-managers/lxqt.nix b/nixos/modules/services/x11/desktop-managers/lxqt.nix
index fb907618d35b..2596ec4ad85c 100644
--- a/nixos/modules/services/x11/desktop-managers/lxqt.nix
+++ b/nixos/modules/services/x11/desktop-managers/lxqt.nix
@@ -61,6 +61,8 @@ in
 
     environment.variables.GIO_EXTRA_MODULES = [ "${pkgs.gvfs}/lib/gio/modules" ];
 
+    services.upower.enable = config.powerManagement.enable;
   };
 
+
 }
diff --git a/nixos/modules/services/x11/desktop-managers/mate.nix b/nixos/modules/services/x11/desktop-managers/mate.nix
index ab8a0a48b483..db83aaf3c19f 100644
--- a/nixos/modules/services/x11/desktop-managers/mate.nix
+++ b/nixos/modules/services/x11/desktop-managers/mate.nix
@@ -12,6 +12,17 @@ let
     in
       filter (x: !(builtins.elem (pkgName x) ysNames)) xs;
 
+  addToXDGDirs = p: ''
+    if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
+      export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
+    fi
+
+    if [ -d "${p}/lib/girepository-1.0" ]; then
+      export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
+      export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
+    fi
+  '';
+
   xcfg = config.services.xserver;
   cfg = xcfg.desktopManager.mate;
 
@@ -20,10 +31,14 @@ in
 {
   options = {
 
-    services.xserver.desktopManager.mate.enable = mkOption {
-      type = types.bool;
-      default = false;
-      description = "Enable the MATE desktop environment";
+    services.xserver.desktopManager.mate = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Enable the MATE desktop environment";
+      };
+
+      debug = mkEnableOption "mate-session debug messages";
     };
 
     environment.mate.excludePackages = mkOption {
@@ -47,15 +62,34 @@ in
         # Find theme engines
         export GTK_PATH=${config.system.path}/lib/gtk-3.0:${config.system.path}/lib/gtk-2.0
 
-        export XDG_MENU_PREFIX=mate
+        export XDG_MENU_PREFIX=mate-
 
         # Find the mouse
         export XCURSOR_PATH=~/.icons:${config.system.path}/share/icons
 
+        # Let caja find extensions
+        export CAJA_EXTENSION_DIRS=$CAJA_EXTENSION_DIRS''${CAJA_EXTENSION_DIRS:+:}${config.system.path}/lib/caja/extensions-2.0
+
+        # Let caja extensions find gsettings schemas
+        ${concatMapStrings (p: ''
+          if [ -d "${p}/lib/caja/extensions-2.0" ]; then
+            ${addToXDGDirs p}
+          fi
+          '')
+          config.environment.systemPackages
+        }
+
+        # Let mate-panel find applets
+        export MATE_PANEL_APPLETS_DIR=$MATE_PANEL_APPLETS_DIR''${MATE_PANEL_APPLETS_DIR:+:}${config.system.path}/share/mate-panel/applets
+        export MATE_PANEL_EXTRA_MODULES=$MATE_PANEL_EXTRA_MODULES''${MATE_PANEL_EXTRA_MODULES:+:}${config.system.path}/lib/mate-panel/applets
+
+        # Add mate-control-center paths to some XDG variables because its schemas are needed by mate-settings-daemon, and mate-settings-daemon is a dependency for mate-control-center (that is, they are mutually recursive)
+        ${addToXDGDirs pkgs.mate.mate-control-center}
+
         # Update user dirs as described in http://freedesktop.org/wiki/Software/xdg-user-dirs/
         ${pkgs.xdg-user-dirs}/bin/xdg-user-dirs-update
 
-        ${pkgs.mate.mate-session-manager}/bin/mate-session &
+        ${pkgs.mate.mate-session-manager}/bin/mate-session ${optionalString cfg.debug "--debug"} &
         waitPID=$!
       '';
     };
@@ -68,12 +102,14 @@ in
 
     services.dbus.packages = [
       pkgs.gnome3.dconf
-      pkgs.at_spi2_core
+      pkgs.at-spi2-core
     ];
 
     services.gnome3.gnome-keyring.enable = true;
     services.upower.enable = config.powerManagement.enable;
 
+    security.pam.services."mate-screensaver".unixAuth = true;
+
     environment.pathsToLink = [ "/share" ];
   };
 
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index bb4f4e868fea..91d091d7d7e2 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -25,8 +25,8 @@ in
         type = types.bool;
         default = true;
         description = ''
-          Enable support for Qt 4-based applications. Particularly, install the
-          Qt 4 version of the Breeze theme and a default backend for Phonon.
+          Enable support for Qt 4-based applications. Particularly, install a
+          default backend for Phonon.
         '';
       };
 
@@ -47,6 +47,18 @@ in
             ${getBin config.hardware.pulseaudio.package}/bin/pactl load-module module-device-manager "do_routing=1"
           ''}
 
+          if [ -f "$HOME/.config/kdeglobals" ]
+          then
+              # Remove extraneous font style names.
+              # See also: https://phabricator.kde.org/D9070
+              ${getBin pkgs.gnused}/bin/sed -i "$HOME/.config/kdeglobals" \
+                  -e '/^fixed=/ s/,Regular$//' \
+                  -e '/^font=/ s/,Regular$//' \
+                  -e '/^menuFont=/ s/,Regular$//' \
+                  -e '/^smallestReadableFont=/ s/,Regular$//' \
+                  -e '/^toolBarFont=/ s/,Regular$//'
+          fi
+
           exec "${getBin plasma5.plasma-workspace}/bin/startkde"
         '';
       };
@@ -54,6 +66,10 @@ in
       security.wrappers = {
         kcheckpass.source = "${lib.getBin plasma5.plasma-workspace}/lib/libexec/kcheckpass";
         "start_kdeinit".source = "${lib.getBin pkgs.kinit}/lib/libexec/kf5/start_kdeinit";
+        kwin_wayland = {
+          source = "${lib.getBin plasma5.kwin}/bin/kwin_wayland";
+          capabilities = "cap_sys_nice+ep";
+        };
       };
 
       environment.systemPackages = with pkgs; with qt5; with libsForQt5; with plasma5; with kdeApplications;
@@ -138,15 +154,17 @@ in
           print-manager
 
           breeze-icons
-          pkgs.hicolor_icon_theme
+          pkgs.hicolor-icon-theme
 
           kde-gtk-config breeze-gtk
 
+          qtvirtualkeyboard
+
           libsForQt56.phonon-backend-gstreamer
           libsForQt5.phonon-backend-gstreamer
         ]
 
-        ++ lib.optionals cfg.enableQt4Support [ breeze-qt4 pkgs.phonon-backend-gstreamer ]
+        ++ lib.optionals cfg.enableQt4Support [ pkgs.phonon-backend-gstreamer ]
 
         # Optional hardware support features
         ++ lib.optional config.hardware.bluetooth.enable bluedevil
@@ -193,16 +211,6 @@ in
         theme = mkDefault "breeze";
       };
 
-      boot.plymouth = {
-        theme = mkDefault "breeze";
-        themePackages = mkDefault [
-          (pkgs.breeze-plymouth.override {
-            nixosBranding = true;
-            nixosVersion = config.system.nixosRelease;
-          })
-        ];
-      };
-
       security.pam.services.kde = { allowNullPassword = true; };
 
       # Doing these one by one seems silly, but we currently lack a better
diff --git a/nixos/modules/services/x11/desktop-managers/xfce.nix b/nixos/modules/services/x11/desktop-managers/xfce.nix
index 9c42dc8781b9..7dcc600d2664 100644
--- a/nixos/modules/services/x11/desktop-managers/xfce.nix
+++ b/nixos/modules/services/x11/desktop-managers/xfce.nix
@@ -3,15 +3,11 @@
 with lib;
 
 let
-
-  xcfg = config.services.xserver;
-  cfg = xcfg.desktopManager.xfce;
-
+  cfg = config.services.xserver.desktopManager.xfce;
 in
 
 {
   options = {
-
     services.xserver.desktopManager.xfce = {
       enable = mkOption {
         type = types.bool;
@@ -54,81 +50,93 @@ in
         description = "Application used by XFCE to lock the screen.";
       };
     };
-
   };
 
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs.xfce // pkgs; [
+      # Get GTK+ themes and gtk-update-icon-cache
+      gtk2.out
+
+      # Supplies some abstract icons such as:
+      # utilities-terminal, accessories-text-editor
+      gnome3.defaultIconTheme
+
+      hicolor-icon-theme
+      tango-icon-theme
+      xfce4-icon-theme
+
+      desktop-file-utils
+      shared-mime-info
+
+      # Needed by Xfce's xinitrc script
+      # TODO: replace with command -v
+      which
+
+      exo
+      garcon
+      gtk-xfce-engine
+      gvfs
+      libxfce4ui
+      tumbler
+      xfconf
+
+      mousepad
+      ristretto
+      xfce4-appfinder
+      xfce4-screenshooter
+      xfce4-session
+      xfce4-settings
+      xfce4-terminal
+
+      (thunar.override { thunarPlugins = cfg.thunarPlugins; })
+      thunar-volman # TODO: drop
+    ] ++ (if config.hardware.pulseaudio.enable
+          then [ xfce4-mixer-pulse xfce4-volumed-pulse ]
+	  else [ xfce4-mixer xfce4-volumed ])
+      # TODO: NetworkManager doesn't belong here
+      ++ optionals config.networking.networkmanager.enable [ networkmanagerapplet ]
+      ++ optionals config.powerManagement.enable [ xfce4-power-manager ]
+      ++ optionals cfg.enableXfwm [ xfwm4 ]
+      ++ optionals (!cfg.noDesktop) [
+        xfce4-panel
+        xfce4-notifyd
+        xfdesktop
+      ];
+
+    environment.pathsToLink = [
+      "/share/xfce4"
+      "/share/themes"
+      "/share/mime"
+      "/share/desktop-directories"
+      "/share/gtksourceview-2.0"
+    ];
+
+    environment.variables = {
+      GDK_PIXBUF_MODULE_FILE = "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache";
+      GIO_EXTRA_MODULES = [ "${pkgs.xfce.gvfs}/lib/gio/modules" ];
+    };
 
-  config = mkIf (xcfg.enable && cfg.enable) {
-
-    services.xserver.desktopManager.session = singleton
-      { name = "xfce";
-        bgSupport = true;
-        start =
-          ''
-            ${cfg.extraSessionCommands}
+    services.xserver.desktopManager.session = [{
+      name = "xfce";
+      bgSupport = true;
+      start = ''
+        ${cfg.extraSessionCommands}
 
-            # Set GTK_PATH so that GTK+ can find the theme engines.
-            export GTK_PATH="${config.system.path}/lib/gtk-2.0:${config.system.path}/lib/gtk-3.0"
+        # Set GTK_PATH so that GTK+ can find the theme engines.
+        export GTK_PATH="${config.system.path}/lib/gtk-2.0:${config.system.path}/lib/gtk-3.0"
 
-            # Set GTK_DATA_PREFIX so that GTK+ can find the Xfce themes.
-            export GTK_DATA_PREFIX=${config.system.path}
+        # Set GTK_DATA_PREFIX so that GTK+ can find the Xfce themes.
+        export GTK_DATA_PREFIX=${config.system.path}
 
-            ${pkgs.stdenv.shell} ${pkgs.xfce.xinitrc} &
-            waitPID=$!
-          '';
-      };
+        ${pkgs.runtimeShell} ${pkgs.xfce.xinitrc} &
+        waitPID=$!
+      '';
+    }];
 
     services.xserver.updateDbusEnvironment = true;
 
-    environment.systemPackages =
-      [ pkgs.gtk2.out # To get GTK+'s themes and gtk-update-icon-cache
-        pkgs.hicolor_icon_theme
-        pkgs.tango-icon-theme
-        pkgs.shared_mime_info
-        pkgs.which # Needed by the xfce's xinitrc script.
-        pkgs."${cfg.screenLock}"
-        pkgs.xfce.exo
-        pkgs.xfce.gtk_xfce_engine
-        pkgs.xfce.mousepad
-        pkgs.xfce.ristretto
-        pkgs.xfce.terminal
-       (pkgs.xfce.thunar.override { thunarPlugins = cfg.thunarPlugins; })
-        pkgs.xfce.xfce4icontheme
-        pkgs.xfce.xfce4session
-        pkgs.xfce.xfce4settings
-        pkgs.xfce.xfce4mixer
-        pkgs.xfce.xfce4volumed
-        pkgs.xfce.xfce4-screenshooter
-        pkgs.xfce.xfconf
-        # This supplies some "abstract" icons such as
-        # "utilities-terminal" and "accessories-text-editor".
-        pkgs.gnome3.defaultIconTheme
-        pkgs.desktop_file_utils
-        pkgs.xfce.libxfce4ui
-        pkgs.xfce.garcon
-        pkgs.xfce.thunar_volman
-        pkgs.xfce.gvfs
-        pkgs.xfce.xfce4_appfinder
-        pkgs.xfce.tumbler       # found via dbus
-      ]
-      ++ optional cfg.enableXfwm pkgs.xfce.xfwm4
-      ++ optional config.powerManagement.enable pkgs.xfce.xfce4_power_manager
-      ++ optional config.networking.networkmanager.enable pkgs.networkmanagerapplet
-      ++ optionals (!cfg.noDesktop)
-         [ pkgs.xfce.xfce4panel
-           pkgs.xfce.xfdesktop
-	   pkgs.xfce.xfce4notifyd  # found via dbus
-         ];
-
-    environment.pathsToLink =
-      [ "/share/xfce4" "/share/themes" "/share/mime" "/share/desktop-directories" "/share/gtksourceview-2.0" ];
-
-    environment.variables.GIO_EXTRA_MODULES = [ "${pkgs.xfce.gvfs}/lib/gio/modules" ];
-
     # Enable helpful DBus services.
     services.udisks2.enable = true;
     services.upower.enable = config.powerManagement.enable;
-
   };
-
 }
diff --git a/nixos/modules/services/x11/display-managers/default.nix b/nixos/modules/services/x11/display-managers/default.nix
index 3fa482fb6722..43ed21c95fee 100644
--- a/nixos/modules/services/x11/display-managers/default.nix
+++ b/nixos/modules/services/x11/display-managers/default.nix
@@ -59,12 +59,6 @@ let
       # Now it should be safe to assume that the script was called with the
       # expected parameters.
 
-      ${optionalString cfg.displayManager.logToJournal ''
-        if [ -z "$_DID_SYSTEMD_CAT" ]; then
-          _DID_SYSTEMD_CAT=1 exec ${config.systemd.package}/bin/systemd-cat -t xsession -- "$0" "$@"
-        fi
-      ''}
-
       . /etc/profile
       cd "$HOME"
 
@@ -72,16 +66,23 @@ let
       sessionType="$1"
       if [ "$sessionType" = default ]; then sessionType=""; fi
 
-      ${optionalString (!cfg.displayManager.job.logsXsession && !cfg.displayManager.logToJournal) ''
-        exec > ~/.xsession-errors 2>&1
-      ''}
-
       ${optionalString cfg.startDbusSession ''
         if test -z "$DBUS_SESSION_BUS_ADDRESS"; then
           exec ${pkgs.dbus.dbus-launch} --exit-with-session "$0" "$sessionType"
         fi
       ''}
 
+      ${optionalString cfg.displayManager.job.logToJournal ''
+        if [ -z "$_DID_SYSTEMD_CAT" ]; then
+          export _DID_SYSTEMD_CAT=1
+          exec ${config.systemd.package}/bin/systemd-cat -t xsession "$0" "$sessionType"
+        fi
+      ''}
+
+      ${optionalString cfg.displayManager.job.logToFile ''
+        exec &> >(tee ~/.xsession-errors)
+      ''}
+
       # Start PulseAudio if enabled.
       ${optionalString (config.hardware.pulseaudio.enable) ''
         ${optionalString (!config.hardware.pulseaudio.systemWide)
@@ -306,26 +307,24 @@ in
           description = "Additional environment variables needed by the display manager.";
         };
 
-        logsXsession = mkOption {
+        logToFile = mkOption {
           type = types.bool;
           default = false;
           description = ''
-            Whether the display manager redirects the
-            output of the session script to
-            <filename>~/.xsession-errors</filename>.
+            Whether the display manager redirects the output of the
+            session script to <filename>~/.xsession-errors</filename>.
           '';
         };
 
-      };
+        logToJournal = mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            Whether the display manager redirects the output of the
+            session script to the systemd journal.
+          '';
+        };
 
-      logToJournal = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          By default, the stdout/stderr of sessions is written
-          to <filename>~/.xsession-errors</filename>. When this option
-          is enabled, it will instead be written to the journal.
-        '';
       };
 
     };
diff --git a/nixos/modules/services/x11/display-managers/gdm.nix b/nixos/modules/services/x11/display-managers/gdm.nix
index e83f26516f5f..70fc7388c2ac 100644
--- a/nixos/modules/services/x11/display-managers/gdm.nix
+++ b/nixos/modules/services/x11/display-managers/gdm.nix
@@ -122,11 +122,8 @@ in
       "rc-local.service"
       "systemd-machined.service"
       "systemd-user-sessions.service"
-      "getty@tty1.service"
     ];
 
-    systemd.services."getty@tty1".enable = false;
-    systemd.services.display-manager.conflicts = [ "getty@tty1.service" ];
     systemd.services.display-manager.serviceConfig = {
       # Restart = "always"; - already defined in xserver.nix
       KillMode = "mixed";
@@ -136,7 +133,7 @@ in
       StandardError = "inherit";
     };
 
-    systemd.services.display-manager.path = [ pkgs.gnome3.gnome_session ];
+    systemd.services.display-manager.path = [ pkgs.gnome3.gnome-session ];
 
     services.dbus.packages = [ gdm ];
 
@@ -196,7 +193,7 @@ in
         auth     required       pam_env.so envfile=${config.system.build.pamEnvironment}
 
         auth     required       pam_succeed_if.so uid >= 1000 quiet
-        auth     optional       ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
+        auth     optional       ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so
         auth     ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
         ${optionalString config.security.pam.enableEcryptfs
           "auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
@@ -216,7 +213,7 @@ in
           "session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
         session  required       pam_loginuid.so
         session  optional       ${pkgs.systemd}/lib/security/pam_systemd.so
-        session  optional       ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
+        session  optional       ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so auto_start
       '';
 
       gdm-password.text = ''
@@ -224,7 +221,7 @@ in
         auth     required       pam_env.so envfile=${config.system.build.pamEnvironment}
 
         auth     required       pam_succeed_if.so uid >= 1000 quiet
-        auth     optional       ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
+        auth     optional       ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so
         auth     ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
         ${optionalString config.security.pam.enableEcryptfs
           "auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
@@ -243,7 +240,7 @@ in
           "session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
         session  required       pam_loginuid.so
         session  optional       ${pkgs.systemd}/lib/security/pam_systemd.so
-        session  optional       ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
+        session  optional       ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so auto_start
       '';
 
       gdm-autologin.text = ''
diff --git a/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix b/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
index 1d5dcb2c7cbe..2a71d2338607 100644
--- a/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
@@ -45,6 +45,8 @@ let
     theme-name = ${cfg.theme.name}
     icon-theme-name = ${cfg.iconTheme.name}
     background = ${ldmcfg.background}
+    ${optionalString (cfg.clock-format != null) "clock-format = ${cfg.clock-format}"}
+    ${optionalString (cfg.indicators != null) "indicators = ${concatStringsSep ";" cfg.indicators}"}
     ${cfg.extraConfig}
     '';
 
@@ -66,8 +68,8 @@ in
 
         package = mkOption {
           type = types.package;
-          default = pkgs.gnome3.gnome_themes_standard;
-          defaultText = "pkgs.gnome3.gnome_themes_standard";
+          default = pkgs.gnome3.gnome-themes-standard;
+          defaultText = "pkgs.gnome3.gnome-themes-standard";
           description = ''
             The package path that contains the theme given in the name option.
           '';
@@ -104,6 +106,35 @@ in
 
       };
 
+      clock-format = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "%F";
+        description = ''
+          Clock format string (as expected by strftime, e.g. "%H:%M")
+          to use with the lightdm gtk greeter panel.
+
+          If set to null the default clock format is used.
+        '';
+      };
+
+      indicators = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        example = [ "~host" "~spacer" "~clock" "~spacer" "~session" "~language" "~a11y" "~power" ];
+        description = ''
+          List of allowed indicator modules to use for the lightdm gtk
+          greeter panel.
+
+          Built-in indicators include "~a11y", "~language", "~session",
+          "~power", "~clock", "~host", "~spacer". Unity indicators can be
+          represented by short name (e.g. "sound", "power"), service file name,
+          or absolute path.
+
+          If set to null the default indicators are used.
+        '';
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         default = "";
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index 1733f2fd39b2..9d30155a7234 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -9,6 +9,10 @@ let
   xEnv = config.systemd.services."display-manager".environment;
   cfg = dmcfg.lightdm;
 
+  dmDefault = xcfg.desktopManager.default;
+  wmDefault = xcfg.windowManager.default;
+  hasDefaultUserSession = dmDefault != "none" || wmDefault != "none";
+
   inherit (pkgs) stdenv lightdm writeScript writeText;
 
   # lightdm runs with clearenv(), but we need a few things in the enviornment for X to startup
@@ -54,14 +58,13 @@ let
         autologin-user-timeout = ${toString cfg.autoLogin.timeout}
         autologin-session = ${defaultSessionName}
       ''}
+      ${optionalString hasDefaultUserSession ''
+        user-session=${defaultSessionName}
+      ''}
       ${cfg.extraSeatDefaults}
     '';
 
-  defaultSessionName =
-    let
-      dm = xcfg.desktopManager.default;
-      wm = xcfg.windowManager.default;
-    in dm + optionalString (wm != "none") ("+" + wm);
+  defaultSessionName = dmDefault + optionalString (wmDefault != "none") ("+" + wmDefault);
 in
 {
   # Note: the order in which lightdm greeter modules are imported
@@ -179,6 +182,14 @@ in
           default session: ${defaultSessionName} is not valid.
         '';
       }
+      { assertion = hasDefaultUserSession -> elem defaultSessionName dmcfg.session.names;
+        message = ''
+          services.xserver.desktopManager.default and
+          services.xserver.windowMananger.default are not set to valid
+          values. The current default session: ${defaultSessionName}
+          is not valid.
+        '';
+      }
       { assertion = !cfg.greeter.enable -> (cfg.autoLogin.enable && cfg.autoLogin.timeout == 0);
         message = ''
           LightDM can only run without greeter if automatic login is enabled and the timeout for it
@@ -190,7 +201,7 @@ in
     services.xserver.displayManager.slim.enable = false;
 
     services.xserver.displayManager.job = {
-      logsXsession = true;
+      logToFile = true;
 
       # lightdm relaunches itself via just `lightdm`, so needs to be on the PATH
       execCmd = ''
diff --git a/nixos/modules/services/x11/display-managers/sddm.nix b/nixos/modules/services/x11/display-managers/sddm.nix
index facaea131ae5..2d4cb8aa20a5 100644
--- a/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixos/modules/services/x11/display-managers/sddm.nix
@@ -205,7 +205,7 @@ in
     services.xserver.displayManager.slim.enable = false;
 
     services.xserver.displayManager.job = {
-      logsXsession = true;
+      logToFile = true;
 
       environment = {
         # Load themes from system environment
diff --git a/nixos/modules/services/x11/display-managers/slim.nix b/nixos/modules/services/x11/display-managers/slim.nix
index 0c4dd1973b53..f645a5c2f078 100644
--- a/nixos/modules/services/x11/display-managers/slim.nix
+++ b/nixos/modules/services/x11/display-managers/slim.nix
@@ -14,7 +14,7 @@ let
       default_xserver ${dmcfg.xserverBin}
       xserver_arguments ${toString dmcfg.xserverArgs}
       sessiondir ${dmcfg.session.desktops}
-      login_cmd exec ${pkgs.stdenv.shell} ${dmcfg.session.script} "%session"
+      login_cmd exec ${pkgs.runtimeShell} ${dmcfg.session.script} "%session"
       halt_cmd ${config.systemd.package}/sbin/shutdown -h now
       reboot_cmd ${config.systemd.package}/sbin/shutdown -r now
       logfile /dev/stderr
diff --git a/nixos/modules/services/x11/display-managers/xpra.nix b/nixos/modules/services/x11/display-managers/xpra.nix
index 8f5ce3dccc6a..b46ede550c16 100644
--- a/nixos/modules/services/x11/display-managers/xpra.nix
+++ b/nixos/modules/services/x11/display-managers/xpra.nix
@@ -220,7 +220,7 @@ in
     '';
 
     services.xserver.displayManager.job = {
-      logsXsession = true;
+      logToFile = true;
 
       execCmd = ''
         ${optionalString (cfg.pulseaudio)
diff --git a/nixos/modules/services/x11/hardware/libinput.nix b/nixos/modules/services/x11/hardware/libinput.nix
index 44555cb6e2a9..d0a87f183b6f 100644
--- a/nixos/modules/services/x11/hardware/libinput.nix
+++ b/nixos/modules/services/x11/hardware/libinput.nix
@@ -170,7 +170,7 @@ in {
 
       disableWhileTyping = mkOption {
         type = types.bool;
-        default = true;
+        default = false;
         description =
           ''
             Disable input method while typing.
@@ -198,6 +198,13 @@ in {
 
     environment.systemPackages = [ pkgs.xorg.xf86inputlibinput ];
 
+    environment.etc = [
+      (let cfgPath = "X11/xorg.conf.d/40-libinput.conf"; in {
+        source = pkgs.xorg.xf86inputlibinput.out + "/share/" + cfgPath;
+        target = cfgPath;
+      })
+    ];
+
     services.udev.packages = [ pkgs.libinput ];
 
     services.xserver.config =
diff --git a/nixos/modules/services/x11/window-managers/2bwm.nix b/nixos/modules/services/x11/window-managers/2bwm.nix
index e3f5ec7dbe67..fdbdf35b0f5a 100644
--- a/nixos/modules/services/x11/window-managers/2bwm.nix
+++ b/nixos/modules/services/x11/window-managers/2bwm.nix
@@ -25,12 +25,12 @@ in
       { name = "2bwm";
         start =
           ''
-            ${pkgs."2bwm"}/bin/2bwm &
+            ${pkgs._2bwm}/bin/2bwm &
             waitPID=$!
           '';
       };
 
-    environment.systemPackages = [ pkgs."2bwm" ];
+    environment.systemPackages = [ pkgs._2bwm ];
 
   };
 
diff --git a/nixos/modules/services/x11/window-managers/awesome.nix b/nixos/modules/services/x11/window-managers/awesome.nix
index eb97449c6bd9..71eb02ec5954 100644
--- a/nixos/modules/services/x11/window-managers/awesome.nix
+++ b/nixos/modules/services/x11/window-managers/awesome.nix
@@ -6,7 +6,11 @@ let
 
   cfg = config.services.xserver.windowManager.awesome;
   awesome = cfg.package;
-  inherit (pkgs.luaPackages) getLuaPath getLuaCPath;
+  getLuaPath = lib : dir : "${lib}/${dir}/lua/${pkgs.luaPackages.lua.luaversion}";
+  makeSearchPath = lib.concatMapStrings (path:
+    " --search " + (getLuaPath path "share") +
+    " --search " + (getLuaPath path "lib")
+  );
 in
 
 {
@@ -46,10 +50,7 @@ in
       { name = "awesome";
         start =
           ''
-            export LUA_CPATH="${lib.concatStringsSep ";" (map getLuaCPath cfg.luaModules)}"
-            export LUA_PATH="${lib.concatStringsSep ";" (map getLuaPath cfg.luaModules)}"
-
-            ${awesome}/bin/awesome &
+            ${awesome}/bin/awesome ${makeSearchPath cfg.luaModules} &
             waitPID=$!
           '';
       };
diff --git a/nixos/modules/services/x11/window-managers/default.nix b/nixos/modules/services/x11/window-managers/default.nix
index 25ba95fccd75..e617e55a7a57 100644
--- a/nixos/modules/services/x11/window-managers/default.nix
+++ b/nixos/modules/services/x11/window-managers/default.nix
@@ -12,6 +12,7 @@ in
     ./afterstep.nix
     ./bspwm.nix
     ./dwm.nix
+    ./evilwm.nix
     ./exwm.nix
     ./fluxbox.nix
     ./fvwm.nix
@@ -61,9 +62,7 @@ in
         example = "wmii";
         description = "Default window manager loaded if none have been chosen.";
         apply = defaultWM:
-          if defaultWM == "none" && cfg.session != []  then
-            (head cfg.session).name
-          else if any (w: w.name == defaultWM) cfg.session then
+          if any (w: w.name == defaultWM) cfg.session then
             defaultWM
           else
             throw "Default window manager (${defaultWM}) not found.";
diff --git a/nixos/modules/services/x11/window-managers/evilwm.nix b/nixos/modules/services/x11/window-managers/evilwm.nix
new file mode 100644
index 000000000000..6e19e3572c79
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/evilwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.evilwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.evilwm.enable = mkEnableOption "evilwm";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "evilwm";
+      start = ''
+	${pkgs.evilwm}/bin/evilwm &
+	waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.evilwm ];
+  };
+}
diff --git a/nixos/modules/services/x11/xautolock.nix b/nixos/modules/services/x11/xautolock.nix
index 28fc92024bcb..a614559970e9 100644
--- a/nixos/modules/services/x11/xautolock.nix
+++ b/nixos/modules/services/x11/xautolock.nix
@@ -26,9 +26,9 @@ in
         };
 
         locker = mkOption {
-          default = "xlock"; # default according to `man xautolock`
-          example = "i3lock -i /path/to/img";
-          type = types.string;
+          default = "${pkgs.xlockmore}/bin/xlock"; # default according to `man xautolock`
+          example = "${pkgs.i3lock}/bin/i3lock -i /path/to/img";
+          type = types.str;
 
           description = ''
             The script to use when automatically locking the computer.
@@ -37,8 +37,8 @@ in
 
         nowlocker = mkOption {
           default = null;
-          example = "i3lock -i /path/to/img";
-          type = types.nullOr types.string;
+          example = "${pkgs.i3lock}/bin/i3lock -i /path/to/img";
+          type = types.nullOr types.str;
 
           description = ''
             The script to use when manually locking the computer with <command>xautolock -locknow</command>.
@@ -56,10 +56,8 @@ in
 
         notifier = mkOption {
           default = null;
-          example = literalExample ''
-            "${pkgs.libnotify}/bin/notify-send \"Locking in 10 seconds\""
-          '';
-          type = types.nullOr types.string;
+          example = "${pkgs.libnotify}/bin/notify-send \"Locking in 10 seconds\"";
+          type = types.nullOr types.str;
 
           description = ''
             Notification script to be used to warn about the pending autolock.
@@ -68,8 +66,8 @@ in
 
         killer = mkOption {
           default = null; # default according to `man xautolock` is none
-          example = "systemctl suspend";
-          type = types.nullOr types.string;
+          example = "${pkgs.systemd}/bin/systemctl suspend";
+          type = types.nullOr types.str;
 
           description = ''
             The script to use when nothing has happend for as long as <option>killtime</option>
@@ -131,6 +129,12 @@ in
           assertion = cfg.killer != null -> cfg.killtime >= 10;
           message = "killtime has to be at least 10 minutes according to `man xautolock`";
         }
-      ];
+      ] ++ (lib.flip map [ "locker" "notifier" "nowlocker" "killer" ]
+        (option:
+        {
+          assertion = cfg."${option}" != null -> builtins.substring 0 1 cfg."${option}" == "/";
+          message = "Please specify a canonical path for `services.xserver.xautolock.${option}`";
+        })
+      );
     };
   }
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 7d544e153e9a..5f0a0f278452 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -161,15 +161,6 @@ in
         '';
       };
 
-      plainX = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Whether the X11 session can be plain (without DM/WM) and
-          the Xsession script will be used as fallback or not.
-        '';
-      };
-
       autorun = mkOption {
         type = types.bool;
         default = true;
@@ -548,7 +539,7 @@ in
           knownVideoDrivers;
       in optional (driver != null) ({ inherit name; modules = []; driverName = name; } // driver));
 
-    nixpkgs.config.xorg = optionalAttrs (elem "vboxvideo" cfg.videoDrivers) { abiCompat = "1.18"; };
+    nixpkgs.config = optionalAttrs (elem "vboxvideo" cfg.videoDrivers) { xorg.abiCompat = "1.18"; };
 
     assertions = [
       { assertion = config.security.polkit.enable;
@@ -561,11 +552,6 @@ in
                 + "${toString (length primaryHeads)} heads set to primary: "
                 + concatMapStringsSep ", " (x: x.output) primaryHeads;
       })
-      { assertion = cfg.desktopManager.default == "none" && cfg.windowManager.default == "none" -> cfg.plainX;
-        message = "Either the desktop manager or the window manager shouldn't be `none`! "
-                + "To explicitly allow this, you can also set `services.xserver.plainX` to `true`. "
-                + "The `default` value looks for enabled WMs/DMs and select the first one.";
-      }
     ];
 
     environment.etc =
@@ -578,6 +564,22 @@ in
             target = "X11/xkb";
           }
         ])
+      # localectl looks into 00-keyboard.conf
+      ++ [
+        {
+          text = ''
+            Section "InputClass"
+              Identifier "Keyboard catchall"
+              MatchIsKeyboard "on"
+              Option "XkbModel" "${cfg.xkbModel}"
+              Option "XkbLayout" "${cfg.layout}"
+              Option "XkbOptions" "${cfg.xkbOptions}"
+              Option "XkbVariant" "${cfg.xkbVariant}"
+            EndSection
+          '';
+          target = "X11/xorg.conf.d/00-keyboard.conf";
+        }
+      ]
       # Needed since 1.18; see https://bugs.freedesktop.org/show_bug.cgi?id=89023#c5
       ++ (let cfgPath = "/X11/xorg.conf.d/10-evdev.conf"; in
         [{
@@ -624,9 +626,7 @@ in
 
         environment =
           {
-            XORG_DRI_DRIVER_PATH = "/run/opengl-driver/lib/dri"; # !!! Depends on the driver selected at runtime.
-            LD_LIBRARY_PATH = concatStringsSep ":" (
-              [ "${xorg.libX11.out}/lib" "${xorg.libXext.out}/lib" "/run/opengl-driver/lib" ]
+            LD_LIBRARY_PATH = concatStringsSep ":" ([ "/run/opengl-driver/lib" ]
               ++ concatLists (catAttrs "libPath" cfg.drivers));
           } // cfg.displayManager.job.environment;
 
@@ -697,16 +697,6 @@ in
           ${cfg.monitorSection}
         EndSection
 
-        Section "InputClass"
-          Identifier "Keyboard catchall"
-          MatchIsKeyboard "on"
-          Option "XkbRules" "base"
-          Option "XkbModel" "${cfg.xkbModel}"
-          Option "XkbLayout" "${cfg.layout}"
-          Option "XkbOptions" "${cfg.xkbOptions}"
-          Option "XkbVariant" "${cfg.xkbVariant}"
-        EndSection
-
         # Additional "InputClass" sections
         ${flip concatMapStrings cfg.inputClassSections (inputClassSection: ''
         Section "InputClass"