summary refs log tree commit diff
path: root/nixos/modules/services
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/services')
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/master.nix67
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/worker.nix77
-rw-r--r--nixos/modules/services/desktops/profile-sync-daemon.nix136
-rw-r--r--nixos/modules/services/hardware/triggerhappy.nix114
-rw-r--r--nixos/modules/services/logging/journaldriver.nix2
-rw-r--r--nixos/modules/services/mail/rmilter.nix2
-rw-r--r--nixos/modules/services/mail/rspamd.nix2
-rw-r--r--nixos/modules/services/misc/emby.nix2
-rw-r--r--nixos/modules/services/misc/gitlab.nix4
-rw-r--r--nixos/modules/services/misc/nix-daemon.nix4
-rw-r--r--nixos/modules/services/misc/nix-optimise.nix2
-rw-r--r--nixos/modules/services/misc/redmine.nix168
-rw-r--r--nixos/modules/services/monitoring/prometheus/alertmanager.nix23
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix22
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/snmp.nix8
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/tor.nix40
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/varnish.nix1
-rw-r--r--nixos/modules/services/networking/bitlbee.nix41
-rw-r--r--nixos/modules/services/networking/charybdis.nix2
-rw-r--r--nixos/modules/services/networking/miniupnpd.nix24
-rw-r--r--nixos/modules/services/networking/murmur.nix2
-rw-r--r--nixos/modules/services/networking/pptpd.nix2
-rw-r--r--nixos/modules/services/networking/xl2tpd.nix2
-rw-r--r--nixos/modules/services/networking/xrdp.nix2
-rw-r--r--nixos/modules/services/printing/cupsd.nix2
-rw-r--r--nixos/modules/services/security/clamav.nix7
-rw-r--r--nixos/modules/services/system/saslauthd.nix2
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix474
-rw-r--r--nixos/modules/services/web-apps/tt-rss.nix6
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix17
-rw-r--r--nixos/modules/services/web-servers/nginx/location-options.nix10
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome3.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix15
-rw-r--r--nixos/modules/services/x11/desktop-managers/xfce.nix3
-rw-r--r--nixos/modules/services/x11/display-managers/default.nix11
-rw-r--r--nixos/modules/services/x11/display-managers/gdm.nix17
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix159
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix17
-rw-r--r--nixos/modules/services/x11/display-managers/sddm.nix4
-rw-r--r--nixos/modules/services/x11/display-managers/startx.nix44
-rw-r--r--nixos/modules/services/x11/gdk-pixbuf.nix45
-rw-r--r--nixos/modules/services/x11/xserver.nix10
42 files changed, 1345 insertions, 248 deletions
diff --git a/nixos/modules/services/continuous-integration/buildbot/master.nix b/nixos/modules/services/continuous-integration/buildbot/master.nix
index 8d767de37f00..0f07e6133bb5 100644
--- a/nixos/modules/services/continuous-integration/buildbot/master.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/master.nix
@@ -6,8 +6,12 @@ with lib;
 
 let
   cfg = config.services.buildbot-master;
+
+  python = cfg.package.pythonModule;
+
   escapeStr = s: escape ["'"] s;
-  masterCfg = if cfg.masterCfg == null then pkgs.writeText "master.cfg" ''
+
+  defaultMasterCfg = pkgs.writeText "master.cfg" ''
     from buildbot.plugins import *
     factory = util.BuildFactory()
     c = BuildmasterConfig = dict(
@@ -27,8 +31,28 @@ let
       factory.addStep(step)
 
     ${cfg.extraConfig}
-  ''
-  else cfg.masterCfg;
+  '';
+
+  tacFile = pkgs.writeText "buildbot-master.tac" ''
+    import os
+
+    from twisted.application import service
+    from buildbot.master import BuildMaster
+
+    basedir = '${cfg.buildbotDir}'
+
+    configfile = '${cfg.masterCfg}'
+
+    # Default umask for server
+    umask = None
+
+    # note: this line is matched against to check that this is a buildmaster
+    # directory; do not edit it.
+    application = service.Application('buildmaster')
+
+    m = BuildMaster(basedir, configfile, umask)
+    m.setServiceParent(application)
+  '';
 
 in {
   options = {
@@ -66,9 +90,9 @@ in {
       };
 
       masterCfg = mkOption {
-        type = types.nullOr types.path;
+        type = types.path;
         description = "Optionally pass master.cfg path. Other options in this configuration will be ignored.";
-        default = null;
+        default = defaultMasterCfg;
         example = "/etc/nixos/buildbot/master.cfg";
       };
 
@@ -175,18 +199,25 @@ in {
 
       package = mkOption {
         type = types.package;
-        default = pkgs.buildbot-full;
-        defaultText = "pkgs.buildbot-full";
+        default = pkgs.pythonPackages.buildbot-full;
+        defaultText = "pkgs.pythonPackages.buildbot-full";
         description = "Package to use for buildbot.";
-        example = literalExample "pkgs.buildbot-full";
+        example = literalExample "pkgs.python3Packages.buildbot-full";
       };
 
       packages = mkOption {
-        default = with pkgs; [ python27Packages.twisted git ];
+        default = [ pkgs.git ];
         example = literalExample "[ pkgs.git ]";
         type = types.listOf types.package;
         description = "Packages to add to PATH for the buildbot process.";
       };
+
+      pythonPackages = mkOption {
+        default = pythonPackages: with pythonPackages; [ ];
+        defaultText = "pythonPackages: with pythonPackages; [ ]";
+        description = "Packages to add the to the PYTHONPATH of the buildbot process.";
+        example = literalExample "pythonPackages: with pythonPackages; [ requests ]";
+      };
     };
   };
 
@@ -210,14 +241,15 @@ in {
       description = "Buildbot Continuous Integration Server.";
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
-      path = cfg.packages;
+      path = cfg.packages ++ cfg.pythonPackages python.pkgs;
+      environment.PYTHONPATH = "${python.withPackages (self: cfg.pythonPackages self ++ [ cfg.package ])}/${python.sitePackages}";
 
       preStart = ''
-        env > envvars
-        mkdir -vp ${cfg.buildbotDir}
-        ln -sfv ${masterCfg} ${cfg.buildbotDir}/master.cfg
-        rm -fv $cfg.buildbotDir}/buildbot.tac
-        ${cfg.package}/bin/buildbot create-master ${cfg.buildbotDir}
+        mkdir -vp "${cfg.buildbotDir}"
+        # Link the tac file so buildbot command line tools recognize the directory
+        ln -sf "${tacFile}" "${cfg.buildbotDir}/buildbot.tac"
+        ${cfg.package}/bin/buildbot create-master --db "${cfg.dbUrl}" "${cfg.buildbotDir}"
+        rm -f buildbot.tac.new master.cfg.sample
       '';
 
       serviceConfig = {
@@ -225,12 +257,11 @@ in {
         User = cfg.user;
         Group = cfg.group;
         WorkingDirectory = cfg.home;
-        ExecStart = "${cfg.package}/bin/buildbot start --nodaemon ${cfg.buildbotDir}";
+        # NOTE: call twistd directly with stdout logging for systemd
+        ExecStart = "${python.pkgs.twisted}/bin/twistd -o --nodaemon --pidfile= --logfile - --python ${tacFile}";
       };
-
     };
   };
 
   meta.maintainers = with lib.maintainers; [ nand0p mic92 ];
-
 }
diff --git a/nixos/modules/services/continuous-integration/buildbot/worker.nix b/nixos/modules/services/continuous-integration/buildbot/worker.nix
index 67c541570b97..4130ec918a70 100644
--- a/nixos/modules/services/continuous-integration/buildbot/worker.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/worker.nix
@@ -7,6 +7,40 @@ with lib;
 let
   cfg = config.services.buildbot-worker;
 
+  python = cfg.package.pythonModule;
+
+  tacFile = pkgs.writeText "aur-buildbot-worker.tac" ''
+    import os
+    from io import open
+
+    from buildbot_worker.bot import Worker
+    from twisted.application import service
+
+    basedir = '${cfg.buildbotDir}'
+
+    # note: this line is matched against to check that this is a worker
+    # directory; do not edit it.
+    application = service.Application('buildbot-worker')
+
+    master_url_split = '${cfg.masterUrl}'.split(':')
+    buildmaster_host = master_url_split[0]
+    port = int(master_url_split[1])
+    workername = '${cfg.workerUser}'
+
+    with open('${cfg.workerPassFile}', 'r', encoding='utf-8') as passwd_file:
+        passwd = passwd_file.read().strip('\r\n')
+    keepalive = 600
+    umask = None
+    maxdelay = 300
+    numcpus = None
+    allow_shutdown = None
+
+    s = Worker(buildmaster_host, port, workername, passwd, basedir,
+               keepalive, umask=umask, maxdelay=maxdelay,
+               numcpus=numcpus, allow_shutdown=allow_shutdown)
+    s.setServiceParent(application)
+  '';
+
 in {
   options = {
     services.buildbot-worker = {
@@ -59,6 +93,23 @@ in {
         description = "Specifies the Buildbot Worker password.";
       };
 
+      workerPassFile = mkOption {
+        type = types.path;
+        description = "File used to store the Buildbot Worker password";
+      };
+
+      hostMessage = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = "Description of this worker";
+      };
+
+      adminMessage = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = "Name of the administrator of this worker";
+      };
+
       masterUrl = mkOption {
         default = "localhost:9989";
         type = types.str;
@@ -67,23 +118,24 @@ in {
 
       package = mkOption {
         type = types.package;
-        default = pkgs.buildbot-worker;
-        defaultText = "pkgs.buildbot-worker";
+        default = pkgs.pythonPackages.buildbot-worker;
+        defaultText = "pkgs.pythonPackages.buildbot-worker";
         description = "Package to use for buildbot worker.";
-        example = literalExample "pkgs.buildbot-worker";
+        example = literalExample "pkgs.python3Packages.buildbot-worker";
       };
 
       packages = mkOption {
-        default = with pkgs; [ python27Packages.twisted git ];
+        default = with pkgs; [ git ];
         example = literalExample "[ pkgs.git ]";
         type = types.listOf types.package;
         description = "Packages to add to PATH for the buildbot process.";
       };
-
     };
   };
 
   config = mkIf cfg.enable {
+    services.buildbot-worker.workerPassFile = mkDefault (pkgs.writeText "buildbot-worker-password" cfg.workerPass);
+
     users.groups = optional (cfg.group == "bbworker") {
       name = "bbworker";
     };
@@ -104,11 +156,16 @@ in {
       after = [ "network.target" "buildbot-master.service" ];
       wantedBy = [ "multi-user.target" ];
       path = cfg.packages;
+      environment.PYTHONPATH = "${python.withPackages (p: [ cfg.package ])}/${python.sitePackages}";
 
       preStart = ''
-        mkdir -vp ${cfg.buildbotDir}
-        rm -fv $cfg.buildbotDir}/buildbot.tac
-        ${cfg.package}/bin/buildbot-worker create-worker ${cfg.buildbotDir} ${cfg.masterUrl} ${cfg.workerUser} ${cfg.workerPass}
+        mkdir -vp "${cfg.buildbotDir}/info"
+        ${optionalString (cfg.hostMessage != null) ''
+          ln -sf "${pkgs.writeText "buildbot-worker-host" cfg.hostMessage}" "${cfg.buildbotDir}/info/host"
+        ''}
+        ${optionalString (cfg.adminMessage != null) ''
+          ln -sf "${pkgs.writeText "buildbot-worker-admin" cfg.adminMessage}" "${cfg.buildbotDir}/info/admin"
+        ''}
       '';
 
       serviceConfig = {
@@ -116,11 +173,9 @@ in {
         User = cfg.user;
         Group = cfg.group;
         WorkingDirectory = cfg.home;
-        Environment = "PYTHONPATH=${cfg.package}/lib/python2.7/site-packages:${pkgs.python27Packages.future}/lib/python2.7/site-packages";
 
         # NOTE: call twistd directly with stdout logging for systemd
-        #ExecStart = "${cfg.package}/bin/buildbot-worker start --nodaemon ${cfg.buildbotDir}";
-        ExecStart = "${pkgs.python27Packages.twisted}/bin/twistd -n -l - -y ${cfg.buildbotDir}/buildbot.tac";
+        ExecStart = "${python.pkgs.twisted}/bin/twistd --nodaemon --pidfile= --logfile - --python ${tacFile}";
       };
 
     };
diff --git a/nixos/modules/services/desktops/profile-sync-daemon.nix b/nixos/modules/services/desktops/profile-sync-daemon.nix
index e3f74df3e573..4165bb64fe46 100644
--- a/nixos/modules/services/desktops/profile-sync-daemon.nix
+++ b/nixos/modules/services/desktops/profile-sync-daemon.nix
@@ -4,22 +4,7 @@ with lib;
 
 let
   cfg = config.services.psd;
-
-  configFile = ''
-    ${optionalString (cfg.users != [ ]) ''
-      USERS="${concatStringsSep " " cfg.users}"
-    ''}
-
-    ${optionalString (cfg.browsers != [ ]) ''
-      BROWSERS="${concatStringsSep " " cfg.browsers}"
-    ''}
-
-    ${optionalString (cfg.volatile != "") "VOLATILE=${cfg.volatile}"}
-    ${optionalString (cfg.daemonFile != "") "DAEMON_FILE=${cfg.daemonFile}"}
-  '';
-
 in {
-
   options.services.psd = with types; {
     enable = mkOption {
       type = bool;
@@ -28,32 +13,6 @@ in {
         Whether to enable the Profile Sync daemon.
       '';
     };
-
-    users = mkOption {
-      type = listOf str;
-      default = [ ];
-      example = [ "demo" ];
-      description = ''
-        A list of users whose browser profiles should be sync'd to tmpfs.
-      '';
-    };
-
-    browsers = mkOption {
-      type = listOf str;
-      default = [ ];
-      example = [ "chromium" "firefox" ];
-      description = ''
-        A list of browsers to sync. Available choices are:
-
-        chromium chromium-dev conkeror.mozdev.org epiphany firefox
-        firefox-trunk google-chrome google-chrome-beta google-chrome-unstable
-        heftig-aurora icecat luakit midori opera opera-developer opera-beta
-        qupzilla palemoon rekonq seamonkey
-
-        An empty list will enable all browsers.
-      '';
-    };
-
     resyncTimer = mkOption {
       type = str;
       default = "1h";
@@ -66,80 +25,53 @@ in {
         omitted.
       '';
     };
-
-    volatile = mkOption {
-      type = str;
-      default = "/run/psd-profiles";
-      description = ''
-        The directory where browser profiles should reside(this should be
-        mounted as a tmpfs). Do not include a trailing backslash.
-      '';
-    };
-
-    daemonFile = mkOption {
-      type = str;
-      default = "/run/psd";
-      description = ''
-        Where the pid and backup configuration files will be stored.
-      '';
-    };
   };
 
   config = mkIf cfg.enable {
-    assertions = [
-      { assertion = cfg.users != [];
-        message = "services.psd.users must contain at least one user";
-      }
-    ];
-
     systemd = {
-      services = {
-        psd = {
-          description = "Profile Sync daemon";
-          wants = [ "psd-resync.service" "local-fs.target" ];
-          wantedBy = [ "multi-user.target" ];
-          preStart = "mkdir -p ${cfg.volatile}";
-
-          path = with pkgs; [ glibc rsync gawk ];
-
-          unitConfig = {
-            RequiresMountsFor = [ "/home/" ];
+      user = {
+        services = {
+          psd = {
+            enable = true;
+            description = "Profile Sync daemon";
+            wants = [ "psd-resync.service" "local-fs.target" ];
+            wantedBy = [ "default.target" ];
+            path = with pkgs; [ rsync kmod gawk nettools profile-sync-daemon ];
+            unitConfig = {
+              RequiresMountsFor = [ "/home/" ];
+            };
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = "yes";
+              ExecStart = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon sync";
+              ExecStop = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon unsync";
+            };
           };
 
-          serviceConfig = {
-            Type = "oneshot";
-            RemainAfterExit = "yes";
-            ExecStart = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon sync";
-            ExecStop = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon unsync";
+          psd-resync = {
+            enable = true;
+            description = "Timed profile resync";
+            after = [ "psd.service" ];
+            wants = [ "psd-resync.timer" ];
+            partOf = [ "psd.service" ];
+            wantedBy = [ "default.target" ];
+            path = with pkgs; [ rsync kmod gawk nettools profile-sync-daemon ];
+            serviceConfig = {
+              Type = "oneshot";
+              ExecStart = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon resync";
+            };
           };
         };
 
-        psd-resync = {
-          description = "Timed profile resync";
-          after = [ "psd.service" ];
-          wants = [ "psd-resync.timer" ];
-          partOf = [ "psd.service" ];
-
-          path = with pkgs; [ glibc rsync gawk ];
+        timers.psd-resync = {
+          description = "Timer for profile sync daemon - ${cfg.resyncTimer}";
+          partOf = [ "psd-resync.service" "psd.service" ];
 
-          serviceConfig = {
-            Type = "oneshot";
-            ExecStart = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon resync";
+          timerConfig = {
+            OnUnitActiveSec = "${cfg.resyncTimer}";
           };
         };
       };
-
-      timers.psd-resync = {
-        description = "Timer for profile sync daemon - ${cfg.resyncTimer}";
-        partOf = [ "psd-resync.service" "psd.service" ];
-
-        timerConfig = {
-          OnUnitActiveSec = "${cfg.resyncTimer}";
-        };
-      };
     };
-
-    environment.etc."psd.conf".text = configFile;
-
   };
 }
diff --git a/nixos/modules/services/hardware/triggerhappy.nix b/nixos/modules/services/hardware/triggerhappy.nix
new file mode 100644
index 000000000000..81d4a1ae65bf
--- /dev/null
+++ b/nixos/modules/services/hardware/triggerhappy.nix
@@ -0,0 +1,114 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.triggerhappy;
+
+  socket = "/run/thd.socket";
+
+  configFile = pkgs.writeText "triggerhappy.conf" ''
+    ${concatMapStringsSep "\n"
+      ({ keys, event, cmd, ... }:
+        ''${concatMapStringsSep "+" (x: "KEY_" + x) keys} ${toString { press = 1; hold = 2; release = 0; }.${event}} ${cmd}''
+      )
+      cfg.bindings}
+    ${cfg.extraConfig}
+  '';
+
+  bindingCfg = { config, ... }: {
+    options = {
+
+      keys = mkOption {
+        type = types.listOf types.str;
+        description = "List of keys to match.  Key names as defined in linux/input-event-codes.h";
+      };
+
+      event = mkOption {
+        type = types.enum ["press" "hold" "release"];
+        default = "press";
+        description = "Event to match.";
+      };
+
+      cmd = mkOption {
+        type = types.str;
+        description = "What to run.";
+      };
+
+    };
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.triggerhappy = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable the <command>triggerhappy</command> hotkey daemon.
+        '';
+      };
+
+      bindings = mkOption {
+        type = types.listOf (types.submodule bindingCfg);
+        default = [];
+        example = lib.literalExample ''
+          [ { keys = ["PLAYPAUSE"];  cmd = "''${pkgs.mpc_cli}/bin/mpc -q toggle"; } ]
+        '';
+        description = ''
+          Key bindings for <command>triggerhappy</command>.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Literal contents to append to the end of <command>triggerhappy</command> configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.sockets.triggerhappy = {
+      description = "Triggerhappy Socket";
+      wantedBy = [ "sockets.target" ];
+      socketConfig.ListenDatagram = socket;
+    };
+
+    systemd.services.triggerhappy = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "local-fs.target" ];
+      description = "Global hotkey daemon";
+      serviceConfig = {
+        ExecStart = "${pkgs.triggerhappy}/bin/thd --user nobody --socket ${socket} --triggers ${configFile} --deviceglob /dev/input/event*";
+      };
+    };
+
+    services.udev.packages = lib.singleton (pkgs.writeTextFile {
+      name = "triggerhappy-udev-rules";
+      destination = "/etc/udev/rules.d/61-triggerhappy.rules";
+      text = ''
+        ACTION=="add", SUBSYSTEM=="input", KERNEL=="event[0-9]*", ATTRS{name}!="triggerhappy", \
+          RUN+="${pkgs.triggerhappy}/bin/th-cmd --socket ${socket} --passfd --udev"
+      '';
+    });
+
+  };
+
+}
diff --git a/nixos/modules/services/logging/journaldriver.nix b/nixos/modules/services/logging/journaldriver.nix
index 74ac3d4c2365..9bd581e9ec0e 100644
--- a/nixos/modules/services/logging/journaldriver.nix
+++ b/nixos/modules/services/logging/journaldriver.nix
@@ -7,7 +7,7 @@
 # to be set.
 #
 # For further information please consult the documentation in the
-# upstream repository at: https://github.com/aprilabank/journaldriver/
+# upstream repository at: https://github.com/tazjin/journaldriver/
 
 { config, lib, pkgs, ...}:
 
diff --git a/nixos/modules/services/mail/rmilter.nix b/nixos/modules/services/mail/rmilter.nix
index 0d91b247cd34..492c64583219 100644
--- a/nixos/modules/services/mail/rmilter.nix
+++ b/nixos/modules/services/mail/rmilter.nix
@@ -52,7 +52,7 @@ in
 
       enable = mkOption {
         type = types.bool;
-        default = cfg.rspamd.enable;
+        default = false;
         description = "Whether to run the rmilter daemon.";
       };
 
diff --git a/nixos/modules/services/mail/rspamd.nix b/nixos/modules/services/mail/rspamd.nix
index bba11796a3d3..ff01a5dee53d 100644
--- a/nixos/modules/services/mail/rspamd.nix
+++ b/nixos/modules/services/mail/rspamd.nix
@@ -159,7 +159,7 @@ in
 
     services.rspamd = {
 
-      enable = mkEnableOption "Whether to run the rspamd daemon.";
+      enable = mkEnableOption "rspamd, the Rapid spam filtering system";
 
       debug = mkOption {
         type = types.bool;
diff --git a/nixos/modules/services/misc/emby.nix b/nixos/modules/services/misc/emby.nix
index ff68b850cd91..0ad4a3f7376f 100644
--- a/nixos/modules/services/misc/emby.nix
+++ b/nixos/modules/services/misc/emby.nix
@@ -55,7 +55,7 @@ in
         User = cfg.user;
         Group = cfg.group;
         PermissionsStartOnly = "true";
-        ExecStart = "${pkgs.emby}/bin/MediaBrowser.Server.Mono";
+        ExecStart = "${pkgs.emby}/bin/emby -programdata ${cfg.dataDir}";
         Restart = "on-failure";
       };
     };
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index d81aa5643e53..8ea831afb7c1 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -53,6 +53,7 @@ let
     repos_path: "${cfg.statePath}/repositories"
     secret_file: "${cfg.statePath}/config/gitlab_shell_secret"
     log_file: "${cfg.statePath}/log/gitlab-shell.log"
+    custom_hooks_dir: "${cfg.statePath}/custom_hooks"
     redis:
       bin: ${pkgs.redis}/bin/redis-cli
       host: 127.0.0.1
@@ -562,6 +563,9 @@ in {
         mkdir -p ${cfg.statePath}/shell
         mkdir -p ${cfg.statePath}/db
         mkdir -p ${cfg.statePath}/uploads
+        mkdir -p ${cfg.statePath}/custom_hooks/pre-receive.d
+        mkdir -p ${cfg.statePath}/custom_hooks/post-receive.d
+        mkdir -p ${cfg.statePath}/custom_hooks/update.d
 
         rm -rf ${cfg.statePath}/config ${cfg.statePath}/shell/hooks
         mkdir -p ${cfg.statePath}/config
diff --git a/nixos/modules/services/misc/nix-daemon.nix b/nixos/modules/services/misc/nix-daemon.nix
index 24379ec27354..5e171c08d893 100644
--- a/nixos/modules/services/misc/nix-daemon.nix
+++ b/nixos/modules/services/misc/nix-daemon.nix
@@ -399,8 +399,8 @@ in
     systemd.sockets.nix-daemon.wantedBy = [ "sockets.target" ];
 
     systemd.services.nix-daemon =
-      { path = [ nix pkgs.utillinux ]
-          ++ optionals cfg.distributedBuilds [ config.programs.ssh.package pkgs.gzip ]
+      { path = [ nix pkgs.utillinux config.programs.ssh.package ]
+          ++ optionals cfg.distributedBuilds [ pkgs.gzip ]
           ++ optionals (!isNix20) [ pkgs.openssl.bin ];
 
         environment = cfg.envVars
diff --git a/nixos/modules/services/misc/nix-optimise.nix b/nixos/modules/services/misc/nix-optimise.nix
index 6f75e4dd03ea..416529f690e0 100644
--- a/nixos/modules/services/misc/nix-optimise.nix
+++ b/nixos/modules/services/misc/nix-optimise.nix
@@ -40,6 +40,8 @@ in
 
     systemd.services.nix-optimise =
       { description = "Nix Store Optimiser";
+        # No point running it inside a nixos-container. It should be on the host instead.
+        unitConfig.ConditionVirtualization = "!container";
         serviceConfig.ExecStart = "${config.nix.package}/bin/nix-store --optimise";
         startAt = optionals cfg.automatic cfg.dates;
       };
diff --git a/nixos/modules/services/misc/redmine.nix b/nixos/modules/services/misc/redmine.nix
index f763ba21d0b2..8d25ac5cb76f 100644
--- a/nixos/modules/services/misc/redmine.nix
+++ b/nixos/modules/services/misc/redmine.nix
@@ -5,7 +5,7 @@ with lib;
 let
   cfg = config.services.redmine;
 
-  bundle = "${pkgs.redmine}/share/redmine/bin/bundle";
+  bundle = "${cfg.package}/share/redmine/bin/bundle";
 
   databaseYml = pkgs.writeText "database.yml" ''
     production:
@@ -15,6 +15,7 @@ let
       port: ${toString cfg.database.port}
       username: ${cfg.database.user}
       password: #dbpass#
+      ${optionalString (cfg.database.socket != null) "socket: ${cfg.database.socket}"}
   '';
 
   configurationYml = pkgs.writeText "configuration.yml" ''
@@ -29,6 +30,19 @@ let
     ${cfg.extraConfig}
   '';
 
+  unpackTheme = unpack "theme";
+  unpackPlugin = unpack "plugin";
+  unpack = id: (name: source:
+    pkgs.stdenv.mkDerivation {
+      name = "redmine-${id}-${name}";
+      buildInputs = [ pkgs.unzip ];
+      buildCommand = ''
+        mkdir -p $out
+        cd $out
+        unpackFile ${source}
+      '';
+  });
+
 in
 
 {
@@ -40,6 +54,14 @@ in
         description = "Enable the Redmine service.";
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.redmine;
+        defaultText = "pkgs.redmine";
+        description = "Which Redmine package to use.";
+        example = "pkgs.redmine.override { ruby = pkgs.ruby_2_3; }";
+      };
+
       user = mkOption {
         type = types.str;
         default = "redmine";
@@ -52,6 +74,12 @@ in
         description = "Group under which Redmine is ran.";
       };
 
+      port = mkOption {
+        type = types.int;
+        default = 3000;
+        description = "Port on which Redmine is ran.";
+      };
+
       stateDir = mkOption {
         type = types.str;
         default = "/var/lib/redmine";
@@ -66,6 +94,41 @@ in
 
           See https://guides.rubyonrails.org/action_mailer_basics.html#action-mailer-configuration
         '';
+        example = literalExample ''
+          email_delivery:
+            delivery_method: smtp
+            smtp_settings:
+              address: mail.example.com
+              port: 25
+        '';
+      };
+
+      themes = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = "Set of themes.";
+        example = literalExample ''
+          {
+            dkuk-redmine_alex_skin = builtins.fetchurl {
+              url = https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip;
+              sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
+            };
+          }
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = "Set of plugins.";
+        example = literalExample ''
+          {
+            redmine_env_auth = builtins.fetchurl {
+              url = https://github.com/Intera/redmine_env_auth/archive/0.6.zip;
+              sha256 = "0yyr1yjd8gvvh832wdc8m3xfnhhxzk2pk3gm2psg5w9jdvd6skak";
+            };
+          }
+        '';
       };
 
       database = {
@@ -78,7 +141,7 @@ in
 
         host = mkOption {
           type = types.str;
-          default = "127.0.0.1";
+          default = (if cfg.database.socket != null then "localhost" else "127.0.0.1");
           description = "Database host address.";
         };
 
@@ -119,6 +182,13 @@ in
             <option>database.user</option>.
           '';
         };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/mysqld/mysqld.sock";
+          description = "Path to the unix socket file to use for authentication.";
+        };
       };
     };
   };
@@ -126,17 +196,20 @@ in
   config = mkIf cfg.enable {
 
     assertions = [
-      { assertion = cfg.database.passwordFile != null || cfg.database.password != "";
-        message = "either services.redmine.database.passwordFile or services.redmine.database.password must be set";
+      { assertion = cfg.database.passwordFile != null || cfg.database.password != "" || cfg.database.socket != null;
+        message = "one of services.redmine.database.socket, services.redmine.database.passwordFile, or services.redmine.database.password must be set";
+      }
+      { assertion = cfg.database.socket != null -> (cfg.database.type == "mysql2");
+        message = "Socket authentication is only available for the mysql2 database type";
       }
     ];
 
-    environment.systemPackages = [ pkgs.redmine ];
+    environment.systemPackages = [ cfg.package ];
 
     systemd.services.redmine = {
       after = [ "network.target" (if cfg.database.type == "mysql2" then "mysql.service" else "postgresql.service") ];
       wantedBy = [ "multi-user.target" ];
-      environment.HOME = "${pkgs.redmine}/share/redmine";
+      environment.HOME = "${cfg.package}/share/redmine";
       environment.RAILS_ENV = "production";
       environment.RAILS_CACHE = "${cfg.stateDir}/cache";
       environment.REDMINE_LANG = "en";
@@ -151,43 +224,80 @@ in
         subversion
       ];
       preStart = ''
-        # start with a fresh config directory every time
-        rm -rf ${cfg.stateDir}/config
-        cp -r ${pkgs.redmine}/share/redmine/config.dist ${cfg.stateDir}/config
+        # ensure cache directory exists for db:migrate command
+        mkdir -p "${cfg.stateDir}/cache"
 
-        # create the basic state directory layout pkgs.redmine expects
-        mkdir -p /run/redmine
+        # create the basic directory layout the redmine package expects
+        mkdir -p /run/redmine/public
 
         for i in config files log plugins tmp; do
-          mkdir -p ${cfg.stateDir}/$i
-          ln -fs ${cfg.stateDir}/$i /run/redmine/$i
+          mkdir -p "${cfg.stateDir}/$i"
+          ln -fs "${cfg.stateDir}/$i" /run/redmine/
+        done
+
+        for i in plugin_assets themes; do
+          mkdir -p "${cfg.stateDir}/public/$i"
+          ln -fs "${cfg.stateDir}/public/$i" /run/redmine/public/
         done
 
-        # ensure cache directory exists for db:migrate command
-        mkdir -p ${cfg.stateDir}/cache
+
+        # start with a fresh config directory
+        # the config directory is copied instead of linked as some mutable data is stored in there
+        rm -rf "${cfg.stateDir}/config/"*
+        cp -r ${cfg.package}/share/redmine/config.dist/* "${cfg.stateDir}/config/"
 
         # link in the application configuration
-        ln -fs ${configurationYml} ${cfg.stateDir}/config/configuration.yml
+        ln -fs ${configurationYml} "${cfg.stateDir}/config/configuration.yml"
+
+
+        # link in all user specified themes
+        rm -rf "${cfg.stateDir}/public/themes/"*
+        for theme in ${concatStringsSep " " (mapAttrsToList unpackTheme cfg.themes)}; do
+          ln -fs $theme/* "${cfg.stateDir}/public/themes"
+        done
+
+        # link in redmine provided themes
+        ln -sf ${cfg.package}/share/redmine/public/themes.dist/* "${cfg.stateDir}/public/themes/"
+
 
-        chmod -R ug+rwX,o-rwx+x ${cfg.stateDir}/
+        # link in all user specified plugins
+        rm -rf "${cfg.stateDir}/plugins/"*
+        for plugin in ${concatStringsSep " " (mapAttrsToList unpackPlugin cfg.plugins)}; do
+          ln -fs $plugin/* "${cfg.stateDir}/plugins/''${plugin##*-redmine-plugin-}"
+        done
+
+
+        # ensure correct permissions for most files
+        chmod -R ug+rwX,o-rwx+x "${cfg.stateDir}/"
 
-        # handle database.passwordFile
+
+        # handle database.passwordFile & permissions
         DBPASS=$(head -n1 ${cfg.database.passwordFile})
-        cp -f ${databaseYml} ${cfg.stateDir}/config/database.yml
-        sed -e "s,#dbpass#,$DBPASS,g" -i ${cfg.stateDir}/config/database.yml
-        chmod 440 ${cfg.stateDir}/config/database.yml
+        cp -f ${databaseYml} "${cfg.stateDir}/config/database.yml"
+        sed -e "s,#dbpass#,$DBPASS,g" -i "${cfg.stateDir}/config/database.yml"
+        chmod 440 "${cfg.stateDir}/config/database.yml"
+
 
         # generate a secret token if required
         if ! test -e "${cfg.stateDir}/config/initializers/secret_token.rb"; then
           ${bundle} exec rake generate_secret_token
-          chmod 440 ${cfg.stateDir}/config/initializers/secret_token.rb
+          chmod 440 "${cfg.stateDir}/config/initializers/secret_token.rb"
         fi
 
+
         # ensure everything is owned by ${cfg.user}
-        chown -R ${cfg.user}:${cfg.group} ${cfg.stateDir}
+        chown -R ${cfg.user}:${cfg.group} "${cfg.stateDir}"
+
+
+        # execute redmine required commands prior to starting the application
+        # NOTE: su required in case using mysql socket authentication
+        /run/wrappers/bin/su -s ${pkgs.bash}/bin/bash -m -l redmine -c '${bundle} exec rake db:migrate'
+        /run/wrappers/bin/su -s ${pkgs.bash}/bin/bash -m -l redmine -c '${bundle} exec rake redmine:load_default_data'
+
 
-        ${bundle} exec rake db:migrate
-        ${bundle} exec rake redmine:load_default_data
+        # log files don't exist until after first command has been executed
+        # correct ownership of files generated by calling exec rake ...
+        chown -R ${cfg.user}:${cfg.group} "${cfg.stateDir}/log"
       '';
 
       serviceConfig = {
@@ -196,13 +306,13 @@ in
         User = cfg.user;
         Group = cfg.group;
         TimeoutSec = "300";
-        WorkingDirectory = "${pkgs.redmine}/share/redmine";
-        ExecStart="${bundle} exec rails server webrick -e production -P ${cfg.stateDir}/redmine.pid";
+        WorkingDirectory = "${cfg.package}/share/redmine";
+        ExecStart="${bundle} exec rails server webrick -e production -p ${toString cfg.port} -P '${cfg.stateDir}/redmine.pid'";
       };
 
     };
 
-    users.extraUsers = optionalAttrs (cfg.user == "redmine") (singleton
+    users.users = optionalAttrs (cfg.user == "redmine") (singleton
       { name = "redmine";
         group = cfg.group;
         home = cfg.stateDir;
@@ -210,7 +320,7 @@ in
         uid = config.ids.uids.redmine;
       });
 
-    users.extraGroups = optionalAttrs (cfg.group == "redmine") (singleton
+    users.groups = optionalAttrs (cfg.group == "redmine") (singleton
       { name = "redmine";
         gid = config.ids.gids.redmine;
       });
diff --git a/nixos/modules/services/monitoring/prometheus/alertmanager.nix b/nixos/modules/services/monitoring/prometheus/alertmanager.nix
index 8a47c9f1e7d8..8a44cf7fd8f6 100644
--- a/nixos/modules/services/monitoring/prometheus/alertmanager.nix
+++ b/nixos/modules/services/monitoring/prometheus/alertmanager.nix
@@ -9,6 +9,15 @@ let
     if cfg.configText != null then
       pkgs.writeText "alertmanager.yml" cfg.configText
     else mkConfigFile;
+  cmdlineArgs = cfg.extraFlags ++ [
+    "--config.file ${alertmanagerYml}"
+    "--web.listen-address ${cfg.listenAddress}:${toString cfg.port}"
+    "--log.level ${cfg.logLevel}"
+    ] ++ (optional (cfg.webExternalUrl != null)
+      "--web.external-url ${cfg.webExternalUrl}"
+    ) ++ (optional (cfg.logFormat != null)
+      "--log.format ${cfg.logFormat}"
+  );
 in {
   options = {
     services.prometheus.alertmanager = {
@@ -99,6 +108,14 @@ in {
           Open port in firewall for incoming connections.
         '';
       };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Extra commandline options when launching the Alertmanager.
+        '';
+      };
     };
   };
 
@@ -111,11 +128,7 @@ in {
       after    = [ "network.target" ];
       script = ''
         ${pkgs.prometheus-alertmanager.bin}/bin/alertmanager \
-        --config.file ${alertmanagerYml} \
-        --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
-        --log.level ${cfg.logLevel} \
-        ${optionalString (cfg.webExternalUrl != null) ''--web.external-url ${cfg.webExternalUrl} \''}
-        ${optionalString (cfg.logFormat != null) "--log.format ${cfg.logFormat}"}
+          ${concatStringsSep " \\\n  " cmdlineArgs}
       '';
 
       serviceConfig = {
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index 1d5f400250fd..950af848c0f6 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -30,6 +30,7 @@ let
     postfix   = import ./exporters/postfix.nix   { inherit config lib pkgs; };
     snmp      = import ./exporters/snmp.nix      { inherit config lib pkgs; };
     surfboard = import ./exporters/surfboard.nix { inherit config lib pkgs; };
+    tor       = import ./exporters/tor.nix       { inherit config lib pkgs; };
     unifi     = import ./exporters/unifi.nix     { inherit config lib pkgs; };
     varnish   = import ./exporters/varnish.nix   { inherit config lib pkgs; };
   };
@@ -123,15 +124,13 @@ let
       systemd.services."prometheus-${name}-exporter" = mkMerge ([{
         wantedBy = [ "multi-user.target" ];
         after = [ "network.target" ];
-        serviceConfig = {
-          Restart = mkDefault "always";
-          PrivateTmp = mkDefault true;
-          WorkingDirectory = mkDefault /tmp;
-        } // mkIf (!(serviceOpts.serviceConfig.DynamicUser or false)) {
-          User = conf.user;
-          Group = conf.group;
-        };
-      } serviceOpts ]);
+        serviceConfig.Restart = mkDefault "always";
+        serviceConfig.PrivateTmp = mkDefault true;
+        serviceConfig.WorkingDirectory = mkDefault /tmp;
+      } serviceOpts ] ++ optional (serviceOpts.serviceConfig.DynamicUser or false) {
+        serviceConfig.User = conf.user;
+        serviceConfig.Group = conf.group;
+      });
   };
 in
 {
@@ -172,5 +171,8 @@ in
     }) exporterOpts)
   );
 
-  meta.doc = ./exporters.xml;
+  meta = {
+    doc = ./exporters.xml;
+    maintainers = [ maintainers.willibutz ];
+  };
 }
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix b/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
index 404cd0a1896b..0d9194124325 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
@@ -60,10 +60,10 @@ in
       DynamicUser = true;
       ExecStart = ''
         ${pkgs.prometheus-snmp-exporter.bin}/bin/snmp_exporter \
-          -config.file ${configFile} \
-          -log.format ${cfg.logFormat} \
-          -log.level ${cfg.logLevel} \
-          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --config.file=${configFile} \
+          --log.format=${cfg.logFormat} \
+          --log.level=${cfg.logLevel} \
+          --web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
           ${concatStringsSep " \\\n  " cfg.extraFlags}
       '';
     };
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/tor.nix b/nixos/modules/services/monitoring/prometheus/exporters/tor.nix
new file mode 100644
index 000000000000..0e2a13c44ab7
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/tor.nix
@@ -0,0 +1,40 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.tor;
+in
+{
+  port = 9130;
+  extraOpts = {
+    torControlAddress = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = ''
+        Tor control IP address or hostname.
+      '';
+    };
+
+    torControlPort = mkOption {
+      type = types.int;
+      default = 9051;
+      description = ''
+        Tor control port.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-tor-exporter}/bin/prometheus-tor-exporter \
+          -b ${cfg.listenAddress} \
+          -p ${toString cfg.port} \
+          -a ${cfg.torControlAddress} \
+          -c ${toString cfg.torControlPort} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix b/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
index 8dbf2d735ab9..aaed76175b84 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
@@ -69,6 +69,7 @@ in
     path = [ pkgs.varnish ];
     serviceConfig = {
       DynamicUser = true;
+      RestartSec = mkDefault 1;
       ExecStart = ''
         ${pkgs.prometheus-varnish-exporter}/bin/prometheus_varnish_exporter \
           --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
diff --git a/nixos/modules/services/networking/bitlbee.nix b/nixos/modules/services/networking/bitlbee.nix
index 392a8d5c2e7c..46e3b7457610 100644
--- a/nixos/modules/services/networking/bitlbee.nix
+++ b/nixos/modules/services/networking/bitlbee.nix
@@ -7,9 +7,10 @@ let
   cfg = config.services.bitlbee;
   bitlbeeUid = config.ids.uids.bitlbee;
 
-  bitlbeePkg = if cfg.libpurple_plugins == []
-  then pkgs.bitlbee
-  else pkgs.bitlbee.override { enableLibPurple = true; };
+  bitlbeePkg = pkgs.bitlbee.override {
+    enableLibPurple = cfg.libpurple_plugins != [];
+    enablePam = cfg.authBackend == "pam";
+  };
 
   bitlbeeConfig = pkgs.writeText "bitlbee.conf"
     ''
@@ -20,6 +21,7 @@ let
     DaemonInterface = ${cfg.interface}
     DaemonPort = ${toString cfg.portNumber}
     AuthMode = ${cfg.authMode}
+    AuthBackend = ${cfg.authBackend}
     Plugindir = ${pkgs.bitlbee-plugins cfg.plugins}/lib/bitlbee
     ${lib.optionalString (cfg.hostName != "") "HostName = ${cfg.hostName}"}
     ${lib.optionalString (cfg.protocols != "") "Protocols = ${cfg.protocols}"}
@@ -70,6 +72,16 @@ in
         '';
       };
 
+      authBackend = mkOption {
+        default = "storage";
+        type = types.enum [ "storage" "pam" ];
+        description = ''
+          How users are authenticated
+            storage -- save passwords internally
+            pam -- Linux PAM authentication
+        '';
+      };
+
       authMode = mkOption {
         default = "Open";
         type = types.enum [ "Open" "Closed" "Registered" ];
@@ -147,23 +159,22 @@ in
 
   ###### implementation
 
-  config = mkIf config.services.bitlbee.enable {
-
-    users.users = singleton
-      { name = "bitlbee";
+  config =  mkMerge [
+    (mkIf config.services.bitlbee.enable {
+      users.users = singleton {
+        name = "bitlbee";
         uid = bitlbeeUid;
         description = "BitlBee user";
         home = "/var/lib/bitlbee";
         createHome = true;
       };
 
-    users.groups = singleton
-      { name = "bitlbee";
+      users.groups = singleton {
+        name = "bitlbee";
         gid = config.ids.gids.bitlbee;
       };
 
-    systemd.services.bitlbee =
-      {
+      systemd.services.bitlbee = {
         environment.PURPLE_PLUGIN_PATH = purple_plugin_path;
         description = "BitlBee IRC to other chat networks gateway";
         after = [ "network.target" ];
@@ -172,8 +183,12 @@ in
         serviceConfig.ExecStart = "${bitlbeePkg}/sbin/bitlbee -F -n -c ${bitlbeeConfig}";
       };
 
-    environment.systemPackages = [ bitlbeePkg ];
+      environment.systemPackages = [ bitlbeePkg ];
 
-  };
+    })
+    (mkIf (config.services.bitlbee.authBackend == "pam") {
+      security.pam.services.bitlbee = {};
+    })
+  ];
 
 }
diff --git a/nixos/modules/services/networking/charybdis.nix b/nixos/modules/services/networking/charybdis.nix
index 6d57faa9ac2b..3d02dc8d1375 100644
--- a/nixos/modules/services/networking/charybdis.nix
+++ b/nixos/modules/services/networking/charybdis.nix
@@ -90,7 +90,7 @@ in
           BANDB_DBPATH = "${cfg.statedir}/ban.db";
         };
         serviceConfig = {
-          ExecStart   = "${charybdis}/bin/charybdis-ircd -foreground -logfile /dev/stdout -configfile ${configFile}";
+          ExecStart   = "${charybdis}/bin/charybdis -foreground -logfile /dev/stdout -configfile ${configFile}";
           Group = cfg.group;
           User = cfg.user;
           PermissionsStartOnly = true; # preStart needs to run with root permissions
diff --git a/nixos/modules/services/networking/miniupnpd.nix b/nixos/modules/services/networking/miniupnpd.nix
index 19400edb68f9..ab714a6ac75e 100644
--- a/nixos/modules/services/networking/miniupnpd.nix
+++ b/nixos/modules/services/networking/miniupnpd.nix
@@ -57,32 +57,12 @@ in
   };
 
   config = mkIf cfg.enable {
-    # from miniupnpd/netfilter/iptables_init.sh
     networking.firewall.extraCommands = ''
-      iptables -t nat -N MINIUPNPD
-      iptables -t nat -A PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
-      iptables -t mangle -N MINIUPNPD
-      iptables -t mangle -A PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
-      iptables -t filter -N MINIUPNPD
-      iptables -t filter -A FORWARD -i ${cfg.externalInterface} ! -o ${cfg.externalInterface} -j MINIUPNPD
-      iptables -t nat -N MINIUPNPD-PCP-PEER
-      iptables -t nat -A POSTROUTING -o ${cfg.externalInterface} -j MINIUPNPD-PCP-PEER
+      ${pkgs.bash}/bin/bash -x ${pkgs.miniupnpd}/etc/miniupnpd/iptables_init.sh -i ${cfg.externalInterface}
     '';
 
-    # from miniupnpd/netfilter/iptables_removeall.sh
     networking.firewall.extraStopCommands = ''
-      iptables -t nat -F MINIUPNPD
-      iptables -t nat -D PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
-      iptables -t nat -X MINIUPNPD
-      iptables -t mangle -F MINIUPNPD
-      iptables -t mangle -D PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
-      iptables -t mangle -X MINIUPNPD
-      iptables -t filter -F MINIUPNPD
-      iptables -t filter -D FORWARD -i ${cfg.externalInterface} ! -o ${cfg.externalInterface} -j MINIUPNPD
-      iptables -t filter -X MINIUPNPD
-      iptables -t nat -F MINIUPNPD-PCP-PEER
-      iptables -t nat -D POSTROUTING -o ${cfg.externalInterface} -j MINIUPNPD-PCP-PEER
-      iptables -t nat -X MINIUPNPD-PCP-PEER
+      ${pkgs.bash}/bin/bash -x ${pkgs.miniupnpd}/etc/miniupnpd/iptables_removeall.sh -i ${cfg.externalInterface}
     '';
 
     systemd.services.miniupnpd = {
diff --git a/nixos/modules/services/networking/murmur.nix b/nixos/modules/services/networking/murmur.nix
index fcc813e6898f..a6e90feff7ea 100644
--- a/nixos/modules/services/networking/murmur.nix
+++ b/nixos/modules/services/networking/murmur.nix
@@ -50,7 +50,7 @@ in
       enable = mkOption {
         type = types.bool;
         default = false;
-        description = "If enabled, start the Murmur Service.";
+        description = "If enabled, start the Murmur Mumble server.";
       };
 
       autobanAttempts = mkOption {
diff --git a/nixos/modules/services/networking/pptpd.nix b/nixos/modules/services/networking/pptpd.nix
index 56a612b91052..d8b9e8f8341a 100644
--- a/nixos/modules/services/networking/pptpd.nix
+++ b/nixos/modules/services/networking/pptpd.nix
@@ -5,7 +5,7 @@ with lib;
 {
   options = {
     services.pptpd = {
-      enable = mkEnableOption "Whether pptpd should be run on startup.";
+      enable = mkEnableOption "pptpd, the Point-to-Point Tunneling Protocol daemon";
 
       serverIp = mkOption {
         type        = types.string;
diff --git a/nixos/modules/services/networking/xl2tpd.nix b/nixos/modules/services/networking/xl2tpd.nix
index 46111a76af80..d0a3ed7bb5e0 100644
--- a/nixos/modules/services/networking/xl2tpd.nix
+++ b/nixos/modules/services/networking/xl2tpd.nix
@@ -5,7 +5,7 @@ with lib;
 {
   options = {
     services.xl2tpd = {
-      enable = mkEnableOption "Whether xl2tpd should be run on startup.";
+      enable = mkEnableOption "xl2tpd, the Layer 2 Tunnelling Protocol Daemon";
 
       serverIp = mkOption {
         type        = types.string;
diff --git a/nixos/modules/services/networking/xrdp.nix b/nixos/modules/services/networking/xrdp.nix
index 61f22a366a02..9ed3025e47d4 100644
--- a/nixos/modules/services/networking/xrdp.nix
+++ b/nixos/modules/services/networking/xrdp.nix
@@ -36,7 +36,7 @@ in
 
     services.xrdp = {
 
-      enable = mkEnableOption "Whether xrdp should be run on startup.";
+      enable = mkEnableOption "xrdp, the Remote Desktop Protocol server";
 
       package = mkOption {
         type = types.package;
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index dbf18ec1d114..1031d6f3d7e2 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -250,7 +250,7 @@ in
       drivers = mkOption {
         type = types.listOf types.path;
         default = [];
-        example = literalExample "[ pkgs.gutenprint pkgs.hplip pkgs.splix ]";
+        example = literalExample "with pkgs; [ gutenprint hplip splix cups-googlecloudprint ]";
         description = ''
           CUPS drivers to use. Drivers provided by CUPS, cups-filters,
           Ghostscript and Samba are added unconditionally. If this list contains
diff --git a/nixos/modules/services/security/clamav.nix b/nixos/modules/services/security/clamav.nix
index 9ad0095252de..04b433f8f2bf 100644
--- a/nixos/modules/services/security/clamav.nix
+++ b/nixos/modules/services/security/clamav.nix
@@ -95,7 +95,7 @@ in
     environment.etc."clamav/freshclam.conf".source = freshclamConfigFile;
     environment.etc."clamav/clamd.conf".source = clamdConfigFile;
 
-    systemd.services.clamav-daemon = optionalAttrs cfg.daemon.enable {
+    systemd.services.clamav-daemon = mkIf cfg.daemon.enable {
       description = "ClamAV daemon (clamd)";
       after = optional cfg.updater.enable "clamav-freshclam.service";
       requires = optional cfg.updater.enable "clamav-freshclam.service";
@@ -116,7 +116,7 @@ in
       };
     };
 
-    systemd.timers.clamav-freshclam = optionalAttrs cfg.updater.enable {
+    systemd.timers.clamav-freshclam = mkIf cfg.updater.enable {
       description = "Timer for ClamAV virus database updater (freshclam)";
       wantedBy = [ "timers.target" ];
       timerConfig = {
@@ -125,7 +125,7 @@ in
       };
     };
 
-    systemd.services.clamav-freshclam = optionalAttrs cfg.updater.enable {
+    systemd.services.clamav-freshclam = mkIf cfg.updater.enable {
       description = "ClamAV virus database updater (freshclam)";
       restartTriggers = [ freshclamConfigFile ];
 
@@ -137,6 +137,7 @@ in
       serviceConfig = {
         Type = "oneshot";
         ExecStart = "${pkg}/bin/freshclam";
+        SuccessExitStatus = "1"; # if databases are up to date
         PrivateTmp = "yes";
         PrivateDevices = "yes";
       };
diff --git a/nixos/modules/services/system/saslauthd.nix b/nixos/modules/services/system/saslauthd.nix
index c8ddca9a0db6..8fcf4fb91fc4 100644
--- a/nixos/modules/services/system/saslauthd.nix
+++ b/nixos/modules/services/system/saslauthd.nix
@@ -16,7 +16,7 @@ in
 
     services.saslauthd = {
 
-      enable = mkEnableOption "Whether to enable the Cyrus SASL authentication daemon.";
+      enable = mkEnableOption "saslauthd, the Cyrus SASL authentication daemon";
 
       package = mkOption {
         default = pkgs.cyrus_sasl.bin;
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
new file mode 100644
index 000000000000..db4c8e1a3d85
--- /dev/null
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -0,0 +1,474 @@
+{ config, lib, pkgs, ... }@args:
+
+with lib;
+
+let
+  cfg = config.services.nextcloud;
+
+  toKeyValue = generators.toKeyValue {
+    mkKeyValue = generators.mkKeyValueDefault {} " = ";
+  };
+
+  phpOptionsExtensions = ''
+    ${optionalString cfg.caching.apcu "extension=${cfg.phpPackages.apcu}/lib/php/extensions/apcu.so"}
+    ${optionalString cfg.caching.redis "extension=${cfg.phpPackages.redis}/lib/php/extensions/redis.so"}
+    ${optionalString cfg.caching.memcached "extension=${cfg.phpPackages.memcached}/lib/php/extensions/memcached.so"}
+    zend_extension = opcache.so
+    opcache.enable = 1
+  '';
+  phpOptions = {
+    upload_max_filesize = cfg.maxUploadSize;
+    post_max_size = cfg.maxUploadSize;
+    memory_limit = cfg.maxUploadSize;
+  } // cfg.phpOptions;
+  phpOptionsStr = phpOptionsExtensions + (toKeyValue phpOptions);
+
+  occ = pkgs.writeScriptBin "nextcloud-occ" ''
+    #! ${pkgs.stdenv.shell}
+    cd ${pkgs.nextcloud}
+    exec /run/wrappers/bin/sudo -u nextcloud \
+      NEXTCLOUD_CONFIG_DIR="${cfg.home}/config" \
+      ${config.services.phpfpm.phpPackage}/bin/php \
+      -c ${pkgs.writeText "php.ini" phpOptionsStr}\
+      occ $*
+  '';
+
+in {
+  options.services.nextcloud = {
+    enable = mkEnableOption "nextcloud";
+    hostName = mkOption {
+      type = types.str;
+      description = "FQDN for the nextcloud instance.";
+    };
+    home = mkOption {
+      type = types.str;
+      default = "/var/lib/nextcloud";
+      description = "Storage path of nextcloud.";
+    };
+    https = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Enable if there is a TLS terminating proxy in front of nextcloud.";
+    };
+
+    maxUploadSize = mkOption {
+      default = "512M";
+      type = types.str;
+      description = ''
+        Defines the upload limit for files. This changes the relevant options
+        in php.ini and nginx if enabled.
+      '';
+    };
+
+    skeletonDirectory = mkOption {
+      default = "";
+      type = types.str;
+      description = ''
+        The directory where the skeleton files are located. These files will be
+        copied to the data directory of new users. Leave empty to not copy any
+        skeleton files.
+      '';
+    };
+
+    nginx.enable = mkEnableOption "nginx vhost management";
+
+    webfinger = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable this option if you plan on using the webfinger plugin.
+        The appropriate nginx rewrite rules will be added to your configuration.
+      '';
+    };
+
+    phpPackages = mkOption {
+      type = types.attrs;
+      default = pkgs.php71Packages;
+      defaultText = "pkgs.php71Packages";
+      description = ''
+        Overridable attribute of the PHP packages set to use.  If any caching
+        module is enabled, it will be taken from here.  Therefore it should
+        match the version of PHP given to
+        <literal>services.phpfpm.phpPackage</literal>.
+      '';
+    };
+
+    phpOptions = mkOption {
+      type = types.attrsOf types.str;
+      default = {
+        "short_open_tag" = "Off";
+        "expose_php" = "Off";
+        "error_reporting" = "E_ALL & ~E_DEPRECATED & ~E_STRICT";
+        "display_errors" = "stderr";
+        "opcache.enable_cli" = "1";
+        "opcache.interned_strings_buffer" = "8";
+        "opcache.max_accelerated_files" = "10000";
+        "opcache.memory_consumption" = "128";
+        "opcache.revalidate_freq" = "1";
+        "opcache.fast_shutdown" = "1";
+        "openssl.cafile" = "/etc/ssl/certs/ca-certificates.crt";
+        "catch_workers_output" = "yes";
+      };
+      description = ''
+        Options for PHP's php.ini file for nextcloud.
+      '';
+    };
+
+    poolConfig = mkOption {
+      type = types.lines;
+      default = ''
+        pm = dynamic
+        pm.max_children = 32
+        pm.start_servers = 2
+        pm.min_spare_servers = 2
+        pm.max_spare_servers = 4
+        pm.max_requests = 500
+      '';
+      description = ''
+        Options for nextcloud's PHP pool. See the documentation on <literal>php-fpm.conf</literal> for details on configuration directives.
+      '';
+    };
+
+    config = {
+      dbtype = mkOption {
+        type = types.enum [ "sqlite" "pgsql" "mysql" ];
+        default = "sqlite";
+        description = "Database type.";
+      };
+      dbname = mkOption {
+        type = types.nullOr types.str;
+        default = "nextcloud";
+        description = "Database name.";
+      };
+      dbuser = mkOption {
+        type = types.nullOr types.str;
+        default = "nextcloud";
+        description = "Database user.";
+      };
+      dbpass = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Database password.  Use <literal>dbpassFile</literal> to avoid this
+          being world-readable in the <literal>/nix/store</literal>.
+        '';
+      };
+      dbpassFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          The full path to a file that contains the database password.
+        '';
+      };
+      dbhost = mkOption {
+        type = types.nullOr types.str;
+        default = "localhost";
+        description = "Database host.";
+      };
+      dbport = mkOption {
+        type = with types; nullOr (either int str);
+        default = null;
+        description = "Database port.";
+      };
+      dbtableprefix = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Table prefix in Nextcloud database.";
+      };
+      adminuser = mkOption {
+        type = types.str;
+        default = "root";
+        description = "Admin username.";
+      };
+      adminpass = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Database password.  Use <literal>adminpassFile</literal> to avoid this
+          being world-readable in the <literal>/nix/store</literal>.
+        '';
+      };
+      adminpassFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          The full path to a file that contains the admin's password.
+        '';
+      };
+
+      extraTrustedDomains = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Trusted domains, from which the nextcloud installation will be
+          acessible.  You don't need to add
+          <literal>services.nextcloud.hostname</literal> here.
+        '';
+      };
+    };
+
+    caching = {
+      apcu = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Whether to load the APCu module into PHP.
+        '';
+      };
+      redis = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to load the Redis module into PHP.
+          You still need to enable Redis in your config.php.
+          See https://docs.nextcloud.com/server/14/admin_manual/configuration_server/caching_configuration.html
+        '';
+      };
+      memcached = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to load the Memcached module into PHP.
+          You still need to enable Memcached in your config.php.
+          See https://docs.nextcloud.com/server/14/admin_manual/configuration_server/caching_configuration.html
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [
+    { assertions = let acfg = cfg.config; in [
+        { assertion = !(acfg.dbpass != null && acfg.dbpassFile != null);
+          message = "Please specify no more than one of dbpass or dbpassFile";
+        }
+        { assertion = ((acfg.adminpass != null || acfg.adminpassFile != null)
+            && !(acfg.adminpass != null && acfg.adminpassFile != null));
+          message = "Please specify exactly one of adminpass or adminpassFile";
+        }
+      ];
+    }
+
+    { systemd.timers."nextcloud-cron" = {
+        wantedBy = [ "timers.target" ];
+        timerConfig.OnBootSec = "5m";
+        timerConfig.OnUnitActiveSec = "15m";
+        timerConfig.Unit = "nextcloud-cron.service";
+      };
+
+      systemd.services = {
+        "nextcloud-setup" = let
+          overrideConfig = pkgs.writeText "nextcloud-config.php" ''
+            <?php
+            $CONFIG = [
+              'apps_paths' => [
+                [ 'path' => '${cfg.home}/apps', 'url' => '/apps', 'writable' => false ],
+                [ 'path' => '${cfg.home}/store-apps', 'url' => '/store-apps', 'writable' => true ],
+              ],
+              'datadirectory' => '${cfg.home}/data',
+              'skeletondirectory' => '${cfg.skeletonDirectory}',
+              ${optionalString cfg.caching.apcu "'memcache.local' => '\\OC\\Memcache\\APCu',"}
+              'log_type' => 'syslog',
+            ];
+          '';
+          occInstallCmd = let
+            c = cfg.config;
+            adminpass = if c.adminpassFile != null
+              then ''"$(<"${toString c.adminpassFile}")"''
+              else ''"${toString c.adminpass}"'';
+            dbpass = if c.dbpassFile != null
+              then ''"$(<"${toString c.dbpassFile}")"''
+              else if c.dbpass != null
+              then ''"${toString c.dbpass}"''
+              else null;
+            installFlags = concatStringsSep " \\\n    "
+              (mapAttrsToList (k: v: "${k} ${toString v}") {
+              "--database" = ''"${c.dbtype}"'';
+              # The following attributes are optional depending on the type of
+              # database.  Those that evaluate to null on the left hand side
+              # will be omitted.
+              ${if c.dbname != null then "--database-name" else null} = ''"${c.dbname}"'';
+              ${if c.dbhost != null then "--database-host" else null} = ''"${c.dbhost}"'';
+              ${if c.dbport != null then "--database-port" else null} = ''"${toString c.dbport}"'';
+              ${if c.dbuser != null then "--database-user" else null} = ''"${c.dbuser}"'';
+              ${if (any (x: x != null) [c.dbpass c.dbpassFile])
+                 then "--database-pass" else null} = dbpass;
+              ${if c.dbtableprefix != null
+                then "--database-table-prefix" else null} = ''"${toString c.dbtableprefix}"'';
+              "--admin-user" = ''"${c.adminuser}"'';
+              "--admin-pass" = adminpass;
+              "--data-dir" = ''"${cfg.home}/data"'';
+            });
+          in ''
+            ${occ}/bin/nextcloud-occ maintenance:install \
+                ${installFlags}
+          '';
+          occSetTrustedDomainsCmd = concatStringsSep "\n" (imap0
+            (i: v: ''
+              ${occ}/bin/nextcloud-occ config:system:set trusted_domains \
+                ${toString i} --value="${toString v}"
+            '') ([ cfg.hostName ] ++ cfg.config.extraTrustedDomains));
+
+        in {
+          wantedBy = [ "multi-user.target" ];
+          before = [ "phpfpm-nextcloud.service" ];
+          script = ''
+            chmod og+x ${cfg.home}
+            ln -sf ${pkgs.nextcloud}/apps ${cfg.home}/
+            mkdir -p ${cfg.home}/config ${cfg.home}/data ${cfg.home}/store-apps
+            ln -sf ${overrideConfig} ${cfg.home}/config/override.config.php
+
+            chown -R nextcloud:nginx ${cfg.home}/config ${cfg.home}/data ${cfg.home}/store-apps
+
+            # Do not install if already installed
+            if [[ ! -e ${cfg.home}/config/config.php ]]; then
+              ${occInstallCmd}
+            fi
+
+            ${occ}/bin/nextcloud-occ upgrade
+
+            ${occ}/bin/nextcloud-occ config:system:delete trusted_domains
+            ${occSetTrustedDomainsCmd}
+          '';
+          serviceConfig.Type = "oneshot";
+        };
+        "nextcloud-cron" = {
+          environment.NEXTCLOUD_CONFIG_DIR = "${cfg.home}/config";
+          serviceConfig.Type = "oneshot";
+          serviceConfig.User = "nextcloud";
+          serviceConfig.ExecStart = "${pkgs.php}/bin/php -f ${pkgs.nextcloud}/cron.php";
+        };
+      };
+
+      services.phpfpm = {
+        phpOptions = phpOptionsExtensions;
+        phpPackage = pkgs.php71;
+        pools.nextcloud = let
+          phpAdminValues = (toKeyValue
+            (foldr (a: b: a // b) {}
+              (mapAttrsToList (k: v: { "php_admin_value[${k}]" = v; })
+                phpOptions)));
+        in {
+          listen = "/run/phpfpm/nextcloud";
+          extraConfig = ''
+            listen.owner = nginx
+            listen.group = nginx
+            user = nextcloud
+            group = nginx
+            ${cfg.poolConfig}
+            env[NEXTCLOUD_CONFIG_DIR] = ${cfg.home}/config
+            env[PATH] = /run/wrappers/bin:/nix/var/nix/profiles/default/bin:/run/current-system/sw/bin:/usr/bin:/bin
+            ${phpAdminValues}
+          '';
+        };
+      };
+
+      users.extraUsers.nextcloud = {
+        home = "${cfg.home}";
+        group = "nginx";
+        createHome = true;
+      };
+
+      environment.systemPackages = [ occ ];
+    }
+
+    (mkIf cfg.nginx.enable {
+      services.nginx = {
+        enable = true;
+        virtualHosts = {
+          "${cfg.hostName}" = {
+            root = pkgs.nextcloud;
+            locations = {
+              "= /robots.txt" = {
+                priority = 100;
+                extraConfig = ''
+                  allow all;
+                  log_not_found off;
+                  access_log off;
+                '';
+              };
+              "/" = {
+                priority = 200;
+                extraConfig = "rewrite ^ /index.php$uri;";
+              };
+              "~ ^/store-apps" = {
+                priority = 201;
+                extraConfig = "root ${cfg.home};";
+              };
+              "= /.well-known/carddav" = {
+                priority = 210;
+                extraConfig = "return 301 $scheme://$host/remote.php/dav;";
+              };
+              "= /.well-known/caldav" = {
+                priority = 210;
+                extraConfig = "return 301 $scheme://$host/remote.php/dav;";
+              };
+              "~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/" = {
+                priority = 300;
+                extraConfig = "deny all;";
+              };
+              "~ ^/(?:\\.|autotest|occ|issue|indie|db_|console)" = {
+                priority = 300;
+                extraConfig = "deny all;";
+              };
+              "~ ^/(?:index|remote|public|cron|core/ajax/update|status|ocs/v[12]|updater/.+|ocs-provider/.+)\\.php(?:$|/)" = {
+                priority = 500;
+                extraConfig = ''
+                  include ${pkgs.nginxMainline}/conf/fastcgi.conf;
+                  fastcgi_split_path_info ^(.+\.php)(/.*)$;
+                  fastcgi_param PATH_INFO $fastcgi_path_info;
+                  fastcgi_param HTTPS ${if cfg.https then "on" else "off"};
+                  fastcgi_param modHeadersAvailable true;
+                  fastcgi_param front_controller_active true;
+                  fastcgi_pass unix:/run/phpfpm/nextcloud;
+                  fastcgi_intercept_errors on;
+                  fastcgi_request_buffering off;
+                  fastcgi_read_timeout 120s;
+                '';
+              };
+              "~ ^/(?:updater|ocs-provider)(?:$|/)".extraConfig = ''
+                try_files $uri/ =404;
+                index index.php;
+              '';
+              "~ \\.(?:css|js|woff|svg|gif)$".extraConfig = ''
+                try_files $uri /index.php$uri$is_args$args;
+                add_header Cache-Control "public, max-age=15778463";
+                add_header X-Content-Type-Options nosniff;
+                add_header X-XSS-Protection "1; mode=block";
+                add_header X-Robots-Tag none;
+                add_header X-Download-Options noopen;
+                add_header X-Permitted-Cross-Domain-Policies none;
+                access_log off;
+              '';
+              "~ \\.(?:png|html|ttf|ico|jpg|jpeg)$".extraConfig = ''
+                try_files $uri /index.php$uri$is_args$args;
+                access_log off;
+              '';
+            };
+            extraConfig = ''
+              add_header X-Content-Type-Options nosniff;
+              add_header X-XSS-Protection "1; mode=block";
+              add_header X-Robots-Tag none;
+              add_header X-Download-Options noopen;
+              add_header X-Permitted-Cross-Domain-Policies none;
+              error_page 403 /core/templates/403.php;
+              error_page 404 /core/templates/404.php;
+              client_max_body_size ${cfg.maxUploadSize};
+              fastcgi_buffers 64 4K;
+              gzip on;
+              gzip_vary on;
+              gzip_comp_level 4;
+              gzip_min_length 256;
+              gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
+              gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
+
+              ${optionalString cfg.webfinger ''
+                rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
+                rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json last;
+              ''}
+            '';
+          };
+        };
+      };
+    })
+  ]);
+}
diff --git a/nixos/modules/services/web-apps/tt-rss.nix b/nixos/modules/services/web-apps/tt-rss.nix
index 2b171aa1b2b2..90b35d19ea11 100644
--- a/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixos/modules/services/web-apps/tt-rss.nix
@@ -624,7 +624,11 @@ let
     };
 
     users = optionalAttrs (cfg.user == "tt_rss") {
-      users.tt_rss.group = "tt_rss";
+      users.tt_rss = {
+        description = "tt-rss service user";
+        isSystemUser = true;
+        group = "tt_rss";
+      };
       groups.tt_rss = {};
     };
   };
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index b231ee5a3f01..508398f03ace 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -245,8 +245,8 @@ let
         }
       ''
   ) virtualHosts);
-  mkLocations = locations: concatStringsSep "\n" (mapAttrsToList (location: config: ''
-    location ${location} {
+  mkLocations = locations: concatStringsSep "\n" (map (config: ''
+    location ${config.location} {
       ${optionalString (config.proxyPass != null && !cfg.proxyResolveWhileRunning)
         "proxy_pass ${config.proxyPass};"
       }
@@ -266,7 +266,18 @@ let
       ${config.extraConfig}
       ${optionalString (config.proxyPass != null && cfg.recommendedProxySettings) "include ${recommendedProxyConfig};"}
     }
-  '') locations);
+  '') (sortProperties (mapAttrsToList (k: v: v // { location = k; }) locations)));
+  mkBasicAuth = vhostName: authDef: let
+    htpasswdFile = pkgs.writeText "${vhostName}.htpasswd" (
+      concatStringsSep "\n" (mapAttrsToList (user: password: ''
+        ${user}:{PLAIN}${password}
+      '') authDef)
+    );
+  in ''
+    auth_basic secured;
+    auth_basic_user_file ${htpasswdFile};
+  '';
+
   mkHtpasswd = vhostName: authDef: pkgs.writeText "${vhostName}.htpasswd" (
     concatStringsSep "\n" (mapAttrsToList (user: password: ''
       ${user}:{PLAIN}${password}
diff --git a/nixos/modules/services/web-servers/nginx/location-options.nix b/nixos/modules/services/web-servers/nginx/location-options.nix
index 4c772734a749..9b44433d3845 100644
--- a/nixos/modules/services/web-servers/nginx/location-options.nix
+++ b/nixos/modules/services/web-servers/nginx/location-options.nix
@@ -71,6 +71,16 @@ with lib;
         These lines go to the end of the location verbatim.
       '';
     };
+
+    priority = mkOption {
+      type = types.int;
+      default = 1000;
+      description = ''
+        Order of this location block in relation to the others in the vhost.
+        The semantics are the same as with `lib.mkOrder`. Smaller values have
+        a greater priority.
+      '';
+    };
   };
 }
 
diff --git a/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixos/modules/services/x11/desktop-managers/gnome3.nix
index eb86f7b53bb6..0d5b860d4617 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -133,7 +133,6 @@ in {
 
     fonts.fonts = [ pkgs.dejavu_fonts pkgs.cantarell-fonts ];
 
-    services.xserver.displayManager.gdm.enable = mkDefault true;
     services.xserver.displayManager.extraSessionFilePackages = [ pkgs.gnome3.gnome-session ];
 
     services.xserver.displayManager.sessionCommands = ''
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index e759f69db897..704cc78c1528 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -64,7 +64,7 @@ in
       };
 
       security.wrappers = {
-        kcheckpass.source = "${lib.getBin plasma5.plasma-workspace}/lib/libexec/kcheckpass";
+        kcheckpass.source = "${lib.getBin plasma5.kscreenlocker}/lib/libexec/kcheckpass";
         "start_kdeinit".source = "${lib.getBin pkgs.kinit}/lib/libexec/kf5/start_kdeinit";
         kwin_wayland = {
           source = "${lib.getBin plasma5.kwin}/bin/kwin_wayland";
@@ -185,10 +185,8 @@ in
         target = "X11/xkb";
       };
 
-      environment.variables = {
-        # Enable GTK applications to load SVG icons
-        GDK_PIXBUF_MODULE_FILE = "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache";
-      };
+      # Enable GTK applications to load SVG icons
+      services.xserver.gdk-pixbuf.modulePackages = [ pkgs.librsvg ];
 
       fonts.fonts = with pkgs; [ noto-fonts hack-font ];
       fonts.fontconfig.defaultFonts = {
@@ -225,11 +223,8 @@ in
       security.pam.services.sddm.enableKwallet = true;
       security.pam.services.slim.enableKwallet = true;
 
-      # Update the start menu for each user that has `isNormalUser` set.
-      system.activationScripts.plasmaSetup = stringAfter [ "users" "groups" ]
-        (concatStringsSep "\n"
-          (mapAttrsToList (name: value: "${pkgs.su}/bin/su ${name} -c ${pkgs.libsForQt5.kservice}/bin/kbuildsycoca5")
-            (filterAttrs (n: v: v.isNormalUser) config.users.users)));
+      # Update the start menu for each user that is currently logged in
+      system.userActivationScripts.plasmaSetup = "${pkgs.libsForQt5.kservice}/bin/kbuildsycoca5";
     })
   ];
 
diff --git a/nixos/modules/services/x11/desktop-managers/xfce.nix b/nixos/modules/services/x11/desktop-managers/xfce.nix
index 75b9a76e1924..dabf09418da7 100644
--- a/nixos/modules/services/x11/desktop-managers/xfce.nix
+++ b/nixos/modules/services/x11/desktop-managers/xfce.nix
@@ -101,10 +101,11 @@ in
     ];
 
     environment.variables = {
-      GDK_PIXBUF_MODULE_FILE = "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache";
       GIO_EXTRA_MODULES = [ "${pkgs.xfce.gvfs}/lib/gio/modules" ];
     };
 
+    services.xserver.gdk-pixbuf.modulePackages = [ pkgs.librsvg ];
+
     services.xserver.desktopManager.session = [{
       name = "xfce";
       bgSupport = true;
diff --git a/nixos/modules/services/x11/display-managers/default.nix b/nixos/modules/services/x11/display-managers/default.nix
index 357fa8ce8f36..26b79730dd38 100644
--- a/nixos/modules/services/x11/display-managers/default.nix
+++ b/nixos/modules/services/x11/display-managers/default.nix
@@ -222,6 +222,17 @@ in
         description = "List of arguments for the X server.";
       };
 
+      setupCommands = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Shell commands executed just after the X server has started.
+
+          This option is only effective for display managers for which this feature
+          is supported; currently these are LightDM, GDM and SDDM.
+        '';
+      };
+
       sessionCommands = mkOption {
         type = types.lines;
         default = "";
diff --git a/nixos/modules/services/x11/display-managers/gdm.nix b/nixos/modules/services/x11/display-managers/gdm.nix
index a775dd0f0e04..6cc30b218f4a 100644
--- a/nixos/modules/services/x11/display-managers/gdm.nix
+++ b/nixos/modules/services/x11/display-managers/gdm.nix
@@ -7,6 +7,13 @@ let
   cfg = config.services.xserver.displayManager;
   gdm = pkgs.gnome3.gdm;
 
+  xSessionWrapper = if (cfg.setupCommands == "") then null else
+    pkgs.writeScript "gdm-x-session-wrapper" ''
+      #!${pkgs.bash}/bin/bash
+      ${cfg.setupCommands}
+      exec "$@"
+    '';
+
 in
 
 {
@@ -112,6 +119,11 @@ in
           GDM_SESSIONS_DIR = "${cfg.session.desktops}/share/xsessions";
           # Find the mouse
           XCURSOR_PATH = "~/.icons:${pkgs.gnome3.adwaita-icon-theme}/share/icons";
+        } // optionalAttrs (xSessionWrapper != null) {
+          # Make GDM use this wrapper before running the session, which runs the
+          # configured setupCommands. This relies on a patched GDM which supports
+          # this environment variable.
+          GDM_X_SESSION_WRAPPER = "${xSessionWrapper}";
         };
         execCmd = "exec ${gdm}/bin/gdm";
       };
@@ -142,7 +154,10 @@ in
 
     systemd.user.services.dbus.wantedBy = [ "default.target" ];
 
-    programs.dconf.profiles.gdm = "${gdm}/share/dconf/profile/gdm";
+    programs.dconf.profiles.gdm = pkgs.writeText "dconf-gdm-profile" ''
+      system-db:local
+      ${gdm}/share/dconf/profile/gdm
+    '';
 
     # Use AutomaticLogin if delay is zero, because it's immediate.
     # Otherwise with TimedLogin with zero seconds the prompt is still
diff --git a/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix b/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix
new file mode 100644
index 000000000000..7c794b1ba175
--- /dev/null
+++ b/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix
@@ -0,0 +1,159 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  cfg = ldmcfg.greeters.enso;
+
+  theme = cfg.theme.package;
+  icons = cfg.iconTheme.package;
+  cursors = cfg.cursorTheme.package;
+
+  # We need a few things in the environment for the greeter to run with
+  # fonts/icons.
+  wrappedEnsoGreeter = pkgs.runCommand "lightdm-enso-os-greeter"
+    { buildInputs = [ pkgs.makeWrapper ]; }
+    ''
+      # This wrapper ensures that we actually get themes
+      makeWrapper ${pkgs.lightdm-enso-os-greeter}/bin/pantheon-greeter \
+        $out/greeter \
+        --prefix PATH : "${pkgs.glibc.bin}/bin" \
+        --set GDK_PIXBUF_MODULE_FILE "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache" \
+        --set GTK_PATH "${theme}:${pkgs.gtk3.out}" \
+        --set GTK_EXE_PREFIX "${theme}" \
+        --set GTK_DATA_PREFIX "${theme}" \
+        --set XDG_DATA_DIRS "${theme}/share:${icons}/share:${cursors}/share" \
+        --set XDG_CONFIG_HOME "${theme}/share"
+
+      cat - > $out/lightdm-enso-os-greeter.desktop << EOF
+      [Desktop Entry]
+      Name=LightDM Greeter
+      Comment=This runs the LightDM Greeter
+      Exec=$out/greeter
+      Type=Application
+      EOF
+    '';
+
+  ensoGreeterConf = pkgs.writeText "lightdm-enso-os-greeter.conf" ''
+    [greeter]
+    default-wallpaper=${ldmcfg.background}
+    gtk-theme=${cfg.theme.name}
+    icon-theme=${cfg.iconTheme.name}
+    cursor-theme=${cfg.cursorTheme.name}
+    blur=${toString cfg.blur}
+    brightness=${toString cfg.brightness}
+    ${cfg.extraConfig}
+  '';
+in {
+  options = {
+    services.xserver.displayManager.lightdm.greeters.enso = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable enso-os-greeter as the lightdm greeter
+        '';
+      };
+
+      theme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.gnome3.gnome-themes-extra;
+          defaultText = "pkgs.gnome3.gnome-themes-extra";
+          description = ''
+            The package path that contains the theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "Adwaita";
+          description = ''
+            Name of the theme to use for the lightdm-enso-os-greeter
+          '';
+        };
+      };
+
+      iconTheme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.papirus-icon-theme;
+          defaultText = "pkgs.papirus-icon-theme";
+          description = ''
+            The package path that contains the icon theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "ePapirus";
+          description = ''
+            Name of the icon theme to use for the lightdm-enso-os-greeter
+          '';
+        };
+      };
+
+      cursorTheme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.capitaine-cursors;
+          defaultText = "pkgs.capitaine-cursors";
+          description = ''
+            The package path that contains the cursor theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "capitane-cursors";
+          description = ''
+            Name of the cursor theme to use for the lightdm-enso-os-greeter
+          '';
+        };
+      };
+
+      blur = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether or not to enable blur
+        '';
+      };
+
+      brightness = mkOption {
+        type = types.int;
+        default = 7;
+        description = ''
+          Brightness
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra configuration that should be put in the greeter.conf
+          configuration file
+        '';
+      };
+    };
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+    environment.etc."lightdm/greeter.conf".source = ensoGreeterConf;
+
+    services.xserver.displayManager.lightdm = {
+      greeter = mkDefault {
+        package = wrappedEnsoGreeter;
+        name = "lightdm-enso-os-greeter";
+      };
+
+      greeters = {
+        gtk = {
+          enable = mkDefault false;
+        };
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index 8078b93a7574..a685dbfff2a0 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -46,6 +46,7 @@ let
         greeters-directory = ${cfg.greeter.package}
       ''}
       sessions-directory = ${dmcfg.session.desktops}/share/xsessions
+      ${cfg.extraConfig}
 
       [Seat:*]
       xserver-command = ${xserverWrapper}
@@ -61,6 +62,12 @@ let
       ${optionalString hasDefaultUserSession ''
         user-session=${defaultSessionName}
       ''}
+      ${optionalString (dmcfg.setupCommands != "") ''
+        display-setup-script=${pkgs.writeScript "lightdm-display-setup" ''
+          #!${pkgs.bash}/bin/bash
+          ${dmcfg.setupCommands}
+        ''}
+      ''}
       ${cfg.extraSeatDefaults}
     '';
 
@@ -73,6 +80,7 @@ in
   imports = [
     ./lightdm-greeters/gtk.nix
     ./lightdm-greeters/mini.nix
+    ./lightdm-greeters/enso-os.nix
   ];
 
   options = {
@@ -113,6 +121,15 @@ in
         };
       };
 
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          user-authority-in-system-dir = true
+        '';
+        description = "Extra lines to append to LightDM section.";
+      };
+
       background = mkOption {
         type = types.str;
         default = "${pkgs.nixos-artwork.wallpapers.simple-dark-gray-bottom}/share/artwork/gnome/nix-wallpaper-simple-dark-gray_bottom.png";
diff --git a/nixos/modules/services/x11/display-managers/sddm.nix b/nixos/modules/services/x11/display-managers/sddm.nix
index 2a9826177737..522a0dc92d6f 100644
--- a/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixos/modules/services/x11/display-managers/sddm.nix
@@ -20,6 +20,7 @@ let
   Xsetup = pkgs.writeScript "Xsetup" ''
     #!/bin/sh
     ${cfg.setupScript}
+    ${dmcfg.setupCommands}
   '';
 
   Xstop = pkgs.writeScript "Xstop" ''
@@ -137,7 +138,8 @@ in
           xrandr --auto
         '';
         description = ''
-          A script to execute when starting the display server.
+          A script to execute when starting the display server. DEPRECATED, please
+          use <option>services.xserver.displayManager.setupCommands</option>.
         '';
       };
 
diff --git a/nixos/modules/services/x11/display-managers/startx.nix b/nixos/modules/services/x11/display-managers/startx.nix
new file mode 100644
index 000000000000..15609540a6e7
--- /dev/null
+++ b/nixos/modules/services/x11/display-managers/startx.nix
@@ -0,0 +1,44 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.displayManager.startx;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.xserver.displayManager.startx = {
+      enable = mkOption {
+        default = false;
+        description = ''
+          Whether to enable the dummy "startx" pseudo-display manager,
+          which allows users to start X manually via the "startx" command
+          from a vt shell. The X server runs under the user's id, not as root.
+          The user must provide a ~/.xinintrc file containing session startup
+          commands, see startx(1). This is not autmatically generated
+          from the desktopManager and windowManager settings.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    services.xserver = {
+      exportConfiguration = true;
+      displayManager.job.execCmd = "";
+      displayManager.lightdm.enable = lib.mkForce false;
+    };
+    systemd.services.display-manager.enable = false;
+    environment.systemPackages =  with pkgs; [ xorg.xinit ];
+  };
+
+}
diff --git a/nixos/modules/services/x11/gdk-pixbuf.nix b/nixos/modules/services/x11/gdk-pixbuf.nix
new file mode 100644
index 000000000000..58faa8e2f9df
--- /dev/null
+++ b/nixos/modules/services/x11/gdk-pixbuf.nix
@@ -0,0 +1,45 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.gdk-pixbuf;
+
+  # Get packages to generate the cache for. We always include gdk_pixbuf.
+  effectivePackages = unique ([pkgs.gdk_pixbuf] ++ cfg.modulePackages);
+
+  # Generate the cache file by running gdk-pixbuf-query-loaders for each
+  # package and concatenating the results.
+  loadersCache = pkgs.runCommand "gdk-pixbuf-loaders.cache" {} ''
+    (
+      for package in ${concatStringsSep " " effectivePackages}; do
+        module_dir="$package/${pkgs.gdk_pixbuf.moduleDir}"
+        if [[ ! -d $module_dir ]]; then
+          echo "Warning (services.xserver.gdk-pixbuf): missing module directory $module_dir" 1>&2
+          continue
+        fi
+        GDK_PIXBUF_MODULEDIR="$module_dir" \
+          ${pkgs.gdk_pixbuf.dev}/bin/gdk-pixbuf-query-loaders
+      done
+    ) > "$out"
+  '';
+in
+
+{
+  options = {
+    services.xserver.gdk-pixbuf.modulePackages = mkOption {
+      type = types.listOf types.package;
+      default = [ ];
+      description = "Packages providing GDK-Pixbuf modules, for cache generation.";
+    };
+  };
+
+  # If there is any package configured in modulePackages, we generate the
+  # loaders.cache based on that and set the environment variable
+  # GDK_PIXBUF_MODULE_FILE to point to it.
+  config = mkIf (cfg.modulePackages != []) {
+    environment.variables = {
+      GDK_PIXBUF_MODULE_FILE = "${loadersCache}";
+    };
+  };
+}
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 75bfeaac1fa3..297e36311656 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -374,6 +374,12 @@ in
         description = "Contents of the first Monitor section of the X server configuration file.";
       };
 
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Additional contents (sections) included in the X server configuration file";
+      };
+
       xrandrHeads = mkOption {
         default = [];
         example = [
@@ -754,6 +760,7 @@ in
             Driver "${driver.driverName or driver.name}"
             ${if cfg.useGlamor then ''Option "AccelMethod" "glamor"'' else ""}
             ${cfg.deviceSection}
+            ${driver.deviceSection or ""}
             ${xrandrDeviceSection}
           EndSection
 
@@ -765,6 +772,7 @@ in
             ''}
 
             ${cfg.screenSection}
+            ${driver.screenSection or ""}
 
             ${optionalString (cfg.defaultDepth != 0) ''
               DefaultDepth ${toString cfg.defaultDepth}
@@ -794,6 +802,8 @@ in
         '')}
 
         ${xrandrMonitorSections}
+
+        ${cfg.extraConfig}
       '';
 
     fonts.enableDefaultFonts = mkDefault true;