about summary refs log tree commit diff
path: root/nixpkgs/nixos/modules/services
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2021-04-28 14:39:00 +0000
committerAlyssa Ross <hi@alyssa.is>2021-06-10 08:52:36 +0000
commit693e64ef7421374338ddb1dc12b9573feec75972 (patch)
tree2526ac075d248699c35d63e04499890ee4381f5f /nixpkgs/nixos/modules/services
parent7014df2256694d97093d6f2bb1db340d346dea88 (diff)
parent8e4fe32876ca15e3d5eb3ecd3ca0b224417f5f17 (diff)
downloadnixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar.gz
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar.bz2
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar.lz
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar.xz
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.tar.zst
nixlib-693e64ef7421374338ddb1dc12b9573feec75972.zip
Merge commit '8e4fe32876ca15e3d5eb3ecd3ca0b224417f5f17'
Diffstat (limited to 'nixpkgs/nixos/modules/services')
-rw-r--r--nixpkgs/nixos/modules/services/backup/borgbackup.nix1
-rw-r--r--nixpkgs/nixos/modules/services/cluster/k3s/default.nix7
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/github-runner.nix299
-rw-r--r--nixpkgs/nixos/modules/services/databases/pgmanage.nix1
-rw-r--r--nixpkgs/nixos/modules/services/databases/postgresql.nix5
-rw-r--r--nixpkgs/nixos/modules/services/databases/redis.nix62
-rw-r--r--nixpkgs/nixos/modules/services/games/quake3-server.nix111
-rw-r--r--nixpkgs/nixos/modules/services/hardware/pcscd.nix71
-rw-r--r--nixpkgs/nixos/modules/services/hardware/sane.nix1
-rw-r--r--nixpkgs/nixos/modules/services/mail/dovecot.nix2
-rw-r--r--nixpkgs/nixos/modules/services/mail/exim.nix9
-rw-r--r--nixpkgs/nixos/modules/services/mail/mailman.nix5
-rw-r--r--nixpkgs/nixos/modules/services/mail/nullmailer.nix1
-rw-r--r--nixpkgs/nixos/modules/services/mail/spamassassin.nix65
-rw-r--r--nixpkgs/nixos/modules/services/misc/bazarr.nix1
-rw-r--r--nixpkgs/nixos/modules/services/misc/etebase-server.nix5
-rw-r--r--nixpkgs/nixos/modules/services/misc/home-assistant.nix20
-rw-r--r--nixpkgs/nixos/modules/services/misc/jellyfin.nix22
-rw-r--r--nixpkgs/nixos/modules/services/misc/matrix-appservice-irc.nix3
-rw-r--r--nixpkgs/nixos/modules/services/misc/matrix-synapse.xml6
-rw-r--r--nixpkgs/nixos/modules/services/misc/nix-daemon.nix1
-rw-r--r--nixpkgs/nixos/modules/services/misc/podgrab.nix50
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/nagios.nix2
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/tuptime.nix5
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/vnstat.nix28
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/ipfs.nix32
-rw-r--r--nixpkgs/nixos/modules/services/networking/babeld.nix18
-rw-r--r--nixpkgs/nixos/modules/services/networking/bird.nix1
-rw-r--r--nixpkgs/nixos/modules/services/networking/dnsdist.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/doh-proxy-rust.nix60
-rw-r--r--nixpkgs/nixos/modules/services/networking/gobgpd.nix64
-rw-r--r--nixpkgs/nixos/modules/services/networking/iscsi/initiator.nix84
-rw-r--r--nixpkgs/nixos/modules/services/networking/iscsi/root-initiator.nix181
-rw-r--r--nixpkgs/nixos/modules/services/networking/iscsi/target.nix53
-rw-r--r--nixpkgs/nixos/modules/services/networking/mxisd.nix4
-rw-r--r--nixpkgs/nixos/modules/services/networking/ncdns.nix6
-rw-r--r--nixpkgs/nixos/modules/services/networking/nebula.nix219
-rw-r--r--nixpkgs/nixos/modules/services/networking/networkmanager.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/pixiecore.nix1
-rw-r--r--nixpkgs/nixos/modules/services/networking/pleroma.nix1
-rw-r--r--nixpkgs/nixos/modules/services/networking/privoxy.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/quagga.nix185
-rw-r--r--nixpkgs/nixos/modules/services/networking/spacecookie.nix161
-rw-r--r--nixpkgs/nixos/modules/services/security/fprintd.nix30
-rw-r--r--nixpkgs/nixos/modules/services/security/oauth2_proxy.nix6
-rw-r--r--nixpkgs/nixos/modules/services/security/privacyidea.nix2
-rw-r--r--nixpkgs/nixos/modules/services/security/sshguard.nix32
-rw-r--r--nixpkgs/nixos/modules/services/security/step-ca.nix134
-rw-r--r--nixpkgs/nixos/modules/services/ttys/getty.nix20
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/discourse.nix8
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/mastodon.nix13
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/nextcloud.nix16
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/minio.nix11
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/nginx/default.nix24
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/nginx/vhost-options.nix13
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/default.nix2
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/e16.nix26
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/wmderland.nix61
58 files changed, 1850 insertions, 407 deletions
diff --git a/nixpkgs/nixos/modules/services/backup/borgbackup.nix b/nixpkgs/nixos/modules/services/backup/borgbackup.nix
index be661b201f0d..18fb29fd72a5 100644
--- a/nixpkgs/nixos/modules/services/backup/borgbackup.nix
+++ b/nixpkgs/nixos/modules/services/backup/borgbackup.nix
@@ -169,6 +169,7 @@ let
         (map (mkAuthorizedKey cfg false) cfg.authorizedKeys
         ++ map (mkAuthorizedKey cfg true) cfg.authorizedKeysAppendOnly);
       useDefaultShell = true;
+      isSystemUser = true;
     };
     groups.${cfg.group} = { };
   };
diff --git a/nixpkgs/nixos/modules/services/cluster/k3s/default.nix b/nixpkgs/nixos/modules/services/cluster/k3s/default.nix
index e62fbc94415c..5ab0286a38a8 100644
--- a/nixpkgs/nixos/modules/services/cluster/k3s/default.nix
+++ b/nixpkgs/nixos/modules/services/cluster/k3s/default.nix
@@ -83,7 +83,8 @@ in
 
     systemd.services.k3s = {
       description = "k3s service";
-      after = mkIf cfg.docker [ "docker.service" ];
+      after = [ "network.service" "firewall.service" ] ++ (optional cfg.docker "docker.service");
+      wants = [ "network.service" "firewall.service" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         # See: https://github.com/rancher/k3s/blob/dddbd16305284ae4bd14c0aade892412310d7edc/install.sh#L197
@@ -92,6 +93,10 @@ in
         Delegate = "yes";
         Restart = "always";
         RestartSec = "5s";
+        LimitNOFILE = 1048576;
+        LimitNPROC = "infinity";
+        LimitCORE = "infinity";
+        TasksMax = "infinity";
         ExecStart = concatStringsSep " \\\n " (
           [
             "${cfg.package}/bin/k3s ${cfg.role}"
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/github-runner.nix b/nixpkgs/nixos/modules/services/continuous-integration/github-runner.nix
new file mode 100644
index 000000000000..9627b723f8f1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/github-runner.nix
@@ -0,0 +1,299 @@
+{ config, pkgs, lib, ... }:
+with lib;
+let
+  cfg = config.services.github-runner;
+  svcName = "github-runner";
+  systemdDir = "${svcName}/${cfg.name}";
+  # %t: Runtime directory root (usually /run); see systemd.unit(5)
+  runtimeDir = "%t/${systemdDir}";
+  # %S: State directory root (usually /var/lib); see systemd.unit(5)
+  stateDir = "%S/${systemdDir}";
+  # %L: Log directory root (usually /var/log); see systemd.unit(5)
+  logsDir = "%L/${systemdDir}";
+in
+{
+  options.services.github-runner = {
+    enable = mkOption {
+      default = false;
+      example = true;
+      description = ''
+        Whether to enable GitHub Actions runner.
+
+        Note: GitHub recommends using self-hosted runners with private repositories only. Learn more here:
+        <link xlink:href="https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners"
+        >About self-hosted runners</link>.
+      '';
+      type = lib.types.bool;
+    };
+
+    url = mkOption {
+      type = types.str;
+      description = ''
+        Repository to add the runner to.
+
+        Changing this option triggers a new runner registration.
+      '';
+      example = "https://github.com/nixos/nixpkgs";
+    };
+
+    tokenFile = mkOption {
+      type = types.path;
+      description = ''
+        The full path to a file which contains the runner registration token.
+        The file should contain exactly one line with the token without any newline.
+        The token can be used to re-register a runner of the same name but is time-limited.
+
+        Changing this option or the file's content triggers a new runner registration.
+      '';
+      example = "/run/secrets/github-runner/nixos.token";
+    };
+
+    name = mkOption {
+      # Same pattern as for `networking.hostName`
+      type = types.strMatching "^$|^[[:alnum:]]([[:alnum:]_-]{0,61}[[:alnum:]])?$";
+      description = ''
+        Name of the runner to configure. Defaults to the hostname.
+
+        Changing this option triggers a new runner registration.
+      '';
+      example = "nixos";
+      default = config.networking.hostName;
+    };
+
+    runnerGroup = mkOption {
+      type = types.nullOr types.str;
+      description = ''
+        Name of the runner group to add this runner to (defaults to the default runner group).
+
+        Changing this option triggers a new runner registration.
+      '';
+      default = null;
+    };
+
+    extraLabels = mkOption {
+      type = types.listOf types.str;
+      description = ''
+        Extra labels in addition to the default (<literal>["self-hosted", "Linux", "X64"]</literal>).
+
+        Changing this option triggers a new runner registration.
+      '';
+      example = literalExample ''[ "nixos" ]'';
+      default = [ ];
+    };
+
+    replace = mkOption {
+      type = types.bool;
+      description = ''
+        Replace any existing runner with the same name.
+
+        Without this flag, registering a new runner with the same name fails.
+      '';
+      default = false;
+    };
+
+    extraPackages = mkOption {
+      type = types.listOf types.package;
+      description = ''
+        Extra packages to add to <literal>PATH</literal> of the service to make them available to workflows.
+      '';
+      default = [ ];
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings = optionals (isStorePath cfg.tokenFile) [
+      ''
+        `services.github-runner.tokenFile` points to the Nix store and, therefore, is world-readable.
+        Consider using a path outside of the Nix store to keep the token private.
+      ''
+    ];
+
+    systemd.services.${svcName} = {
+      description = "GitHub Actions runner";
+
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+      after = [ "network.target" "network-online.target" ];
+
+      environment = {
+        HOME = runtimeDir;
+        RUNNER_ROOT = runtimeDir;
+      };
+
+      path = (with pkgs; [
+        bash
+        coreutils
+        git
+        gnutar
+        gzip
+      ]) ++ [
+        config.nix.package
+      ] ++ cfg.extraPackages;
+
+      serviceConfig = rec {
+        ExecStart = "${pkgs.github-runner}/bin/runsvc.sh";
+
+        # Does the following, sequentially:
+        # - Copy the current and the previous `tokenFile` to the $RUNTIME_DIRECTORY
+        #   and make it accessible to the service user to allow for a content
+        #   comparison.
+        # - If the module configuration or the token has changed, clear the state directory.
+        # - Configure the runner.
+        # - Copy the configured `tokenFile` to the $STATE_DIRECTORY and make it
+        #   inaccessible to the service user.
+        # - Set up the directory structure by creating the necessary symlinks.
+        ExecStartPre =
+          let
+            # Wrapper script which expects the full path of the state, runtime and logs
+            # directory as arguments. Overrides the respective systemd variables to provide
+            # unambiguous directory names. This becomes relevant, for example, if the
+            # caller overrides any of the StateDirectory=, RuntimeDirectory= or LogDirectory=
+            # to contain more than one directory. This causes systemd to set the respective
+            # environment variables with the path of all of the given directories, separated
+            # by a colon.
+            writeScript = name: lines: pkgs.writeShellScript "${svcName}-${name}.sh" ''
+              set -euo pipefail
+
+              STATE_DIRECTORY="$1"
+              RUNTIME_DIRECTORY="$2"
+              LOGS_DIRECTORY="$3"
+
+              ${lines}
+            '';
+            currentConfigPath = "$STATE_DIRECTORY/.nixos-current-config.json";
+            runnerRegistrationConfig = getAttrs [ "name" "tokenFile" "url" "runnerGroup" "extraLabels" ] cfg;
+            newConfigPath = builtins.toFile "${svcName}-config.json" (builtins.toJSON runnerRegistrationConfig);
+            currentConfigTokenFilename = ".current-token";
+            newConfigTokenFilename = ".new-token";
+            runnerCredFiles = [
+              ".credentials"
+              ".credentials_rsaparams"
+              ".runner"
+            ];
+            ownConfigTokens = writeScript "own-config-tokens" ''
+              # Copy current and new token file to runtime dir and make it accessible to the service user
+              cp ${escapeShellArg cfg.tokenFile} "$RUNTIME_DIRECTORY/${newConfigTokenFilename}"
+              chmod 600 "$RUNTIME_DIRECTORY/${newConfigTokenFilename}"
+              chown "$USER" "$RUNTIME_DIRECTORY/${newConfigTokenFilename}"
+
+              if [[ -e "$STATE_DIRECTORY/${currentConfigTokenFilename}" ]]; then
+                cp "$STATE_DIRECTORY/${currentConfigTokenFilename}" "$RUNTIME_DIRECTORY/${currentConfigTokenFilename}"
+                chmod 600 "$RUNTIME_DIRECTORY/${currentConfigTokenFilename}"
+                chown "$USER" "$RUNTIME_DIRECTORY/${currentConfigTokenFilename}"
+              fi
+            '';
+            disownConfigTokens = writeScript "disown-config-tokens" ''
+              # Make the token inaccessible to the runner service user
+              chmod 600 "$STATE_DIRECTORY/${currentConfigTokenFilename}"
+              chown root:root "$STATE_DIRECTORY/${currentConfigTokenFilename}"
+            '';
+            unconfigureRunner = writeScript "unconfigure" ''
+              differs=
+              # Set `differs = 1` if current and new runner config differ or if `currentConfigPath` does not exist
+              ${pkgs.diffutils}/bin/diff -q '${newConfigPath}' "${currentConfigPath}" >/dev/null 2>&1 || differs=1
+              # Also trigger a registration if the token content changed
+              ${pkgs.diffutils}/bin/diff -q \
+                "$RUNTIME_DIRECTORY"/{${currentConfigTokenFilename},${newConfigTokenFilename}} \
+                >/dev/null 2>&1 || differs=1
+
+              if [[ -n "$differs" ]]; then
+                echo "Config has changed, removing old runner state."
+                echo "The old runner will still appear in the GitHub Actions UI." \
+                  "You have to remove it manually."
+                find "$STATE_DIRECTORY/" -mindepth 1 -delete
+              fi
+            '';
+            configureRunner = writeScript "configure" ''
+              empty=$(ls -A "$STATE_DIRECTORY")
+              if [[ -z "$empty" ]]; then
+                echo "Configuring GitHub Actions Runner"
+                token=$(< "$RUNTIME_DIRECTORY"/${newConfigTokenFilename})
+                RUNNER_ROOT="$STATE_DIRECTORY" ${pkgs.github-runner}/bin/config.sh \
+                  --unattended \
+                  --work "$RUNTIME_DIRECTORY" \
+                  --url ${escapeShellArg cfg.url} \
+                  --token "$token" \
+                  --labels ${escapeShellArg (concatStringsSep "," cfg.extraLabels)} \
+                  --name ${escapeShellArg cfg.name} \
+                  ${optionalString cfg.replace "--replace"} \
+                  ${optionalString (cfg.runnerGroup != null) "--runnergroup ${escapeShellArg cfg.runnerGroup}"}
+
+                # Move the automatically created _diag dir to the logs dir
+                mkdir -p  "$STATE_DIRECTORY/_diag"
+                cp    -r  "$STATE_DIRECTORY/_diag/." "$LOGS_DIRECTORY/"
+                rm    -rf "$STATE_DIRECTORY/_diag/"
+
+                # Cleanup token from config
+                rm -f "$RUNTIME_DIRECTORY"/${currentConfigTokenFilename}
+                mv    "$RUNTIME_DIRECTORY"/${newConfigTokenFilename} "$STATE_DIRECTORY/${currentConfigTokenFilename}"
+
+                # Symlink to new config
+                ln -s '${newConfigPath}' "${currentConfigPath}"
+              fi
+            '';
+            setupRuntimeDir = writeScript "setup-runtime-dirs" ''
+              # Link _diag dir
+              ln -s "$LOGS_DIRECTORY" "$RUNTIME_DIRECTORY/_diag"
+
+              # Link the runner credentials to the runtime dir
+              ln -s "$STATE_DIRECTORY"/{${lib.concatStringsSep "," runnerCredFiles}} "$RUNTIME_DIRECTORY/"
+            '';
+          in
+          map (x: "${x} ${escapeShellArgs [ stateDir runtimeDir logsDir ]}") [
+            "+${ownConfigTokens}" # runs as root
+            unconfigureRunner
+            configureRunner
+            "+${disownConfigTokens}" # runs as root
+            setupRuntimeDir
+          ];
+
+        # Contains _diag
+        LogsDirectory = [ systemdDir ];
+        # Default RUNNER_ROOT which contains ephemeral Runner data
+        RuntimeDirectory = [ systemdDir ];
+        # Home of persistent runner data, e.g., credentials
+        StateDirectory = [ systemdDir ];
+        StateDirectoryMode = "0700";
+        WorkingDirectory = runtimeDir;
+
+        # By default, use a dynamically allocated user
+        DynamicUser = true;
+
+        KillMode = "process";
+        KillSignal = "SIGTERM";
+
+        # Hardening (may overlap with DynamicUser=)
+        # The following options are only for optimizing:
+        # systemd-analyze security github-runner
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        UMask = "0066";
+
+        # Needs network access
+        PrivateNetwork = false;
+        # Cannot be true due to Node
+        MemoryDenyWriteExecute = false;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/pgmanage.nix b/nixpkgs/nixos/modules/services/databases/pgmanage.nix
index 0f8634dab319..8508e76b5cd6 100644
--- a/nixpkgs/nixos/modules/services/databases/pgmanage.nix
+++ b/nixpkgs/nixos/modules/services/databases/pgmanage.nix
@@ -197,6 +197,7 @@ in {
         group = pgmanage;
         home  = cfg.sqlRoot;
         createHome = true;
+        isSystemUser = true;
       };
       groups.${pgmanage} = {
         name = pgmanage;
diff --git a/nixpkgs/nixos/modules/services/databases/postgresql.nix b/nixpkgs/nixos/modules/services/databases/postgresql.nix
index 900185fbbdf7..fdc05312ece0 100644
--- a/nixpkgs/nixos/modules/services/databases/postgresql.nix
+++ b/nixpkgs/nixos/modules/services/databases/postgresql.nix
@@ -163,7 +163,7 @@ in
               '';
               example = literalExample ''
                 {
-                  "DATABASE nextcloud" = "ALL PRIVILEGES";
+                  "DATABASE \"nextcloud\"" = "ALL PRIVILEGES";
                   "ALL TABLES IN SCHEMA public" = "ALL PRIVILEGES";
                 }
               '';
@@ -295,8 +295,7 @@ in
       # systems!
       mkDefault (if versionAtLeast config.system.stateVersion "20.03" then pkgs.postgresql_11
             else if versionAtLeast config.system.stateVersion "17.09" then pkgs.postgresql_9_6
-            else if versionAtLeast config.system.stateVersion "16.03" then pkgs.postgresql_9_5
-            else throw "postgresql_9_4 was removed, please upgrade your postgresql version.");
+            else throw "postgresql_9_5 was removed, please upgrade your postgresql version.");
 
     services.postgresql.dataDir = mkDefault "/var/lib/postgresql/${cfg.package.psqlSchema}";
 
diff --git a/nixpkgs/nixos/modules/services/databases/redis.nix b/nixpkgs/nixos/modules/services/databases/redis.nix
index 117e63662258..7ec10c0eb5ab 100644
--- a/nixpkgs/nixos/modules/services/databases/redis.nix
+++ b/nixpkgs/nixos/modules/services/databases/redis.nix
@@ -5,6 +5,8 @@ with lib;
 let
   cfg = config.services.redis;
 
+  ulimitNofile = cfg.maxclients + 32;
+
   mkValueString = value:
     if value == true then "yes"
     else if value == false then "no"
@@ -14,8 +16,8 @@ let
     listsAsDuplicateKeys = true;
     mkKeyValue = generators.mkKeyValueDefault { inherit mkValueString; } " ";
   } cfg.settings);
-in
-{
+
+in {
   imports = [
     (mkRemovedOptionModule [ "services" "redis" "user" ] "The redis module now is hardcoded to the redis user.")
     (mkRemovedOptionModule [ "services" "redis" "dbpath" ] "The redis module now uses /var/lib/redis as data directory.")
@@ -88,6 +90,13 @@ in
         example = "/run/redis/redis.sock";
       };
 
+      unixSocketPerm = mkOption {
+        type = types.int;
+        default = 750;
+        description = "Change permissions for the socket";
+        example = 700;
+      };
+
       logLevel = mkOption {
         type = types.str;
         default = "notice"; # debug, verbose, notice, warning
@@ -114,6 +123,12 @@ in
         description = "Set the number of databases.";
       };
 
+      maxclients = mkOption {
+        type = types.int;
+        default = 10000;
+        description = "Set the max number of connected clients at the same time.";
+      };
+
       save = mkOption {
         type = with types; listOf (listOf int);
         default = [ [900 1] [300 10] [60 10000] ];
@@ -204,7 +219,6 @@ in
         '';
         example = literalExample ''
           {
-            unixsocketperm = "700";
             loadmodule = [ "/path/to/my_module.so" "/path/to/other_module.so" ];
           }
         '';
@@ -247,6 +261,7 @@ in
         logfile = cfg.logfile;
         syslog-enabled = cfg.syslog;
         databases = cfg.databases;
+        maxclients = cfg.maxclients;
         save = map (d: "${toString (builtins.elemAt d 0)} ${toString (builtins.elemAt d 1)}") cfg.save;
         dbfilename = "dump.rdb";
         dir = "/var/lib/redis";
@@ -256,7 +271,7 @@ in
         slowlog-max-len = cfg.slowLogMaxLen;
       }
       (mkIf (cfg.bind != null) { bind = cfg.bind; })
-      (mkIf (cfg.unixSocket != null) { unixsocket = cfg.unixSocket; })
+      (mkIf (cfg.unixSocket != null) { unixsocket = cfg.unixSocket; unixsocketperm = "${toString cfg.unixSocketPerm}"; })
       (mkIf (cfg.slaveOf != null) { slaveof = "${cfg.slaveOf.ip} ${cfg.slaveOf.port}"; })
       (mkIf (cfg.masterAuth != null) { masterauth = cfg.masterAuth; })
       (mkIf (cfg.requirePass != null) { requirepass = cfg.requirePass; })
@@ -277,11 +292,46 @@ in
 
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/redis-server /run/redis/redis.conf";
-        RuntimeDirectory = "redis";
-        StateDirectory = "redis";
         Type = "notify";
+        # User and group
         User = "redis";
         Group = "redis";
+        # Runtime directory and mode
+        RuntimeDirectory = "redis";
+        RuntimeDirectoryMode = "0750";
+        # State directory and mode
+        StateDirectory = "redis";
+        StateDirectoryMode = "0700";
+        # Access write directories
+        UMask = "0077";
+        # Capabilities
+        CapabilityBoundingSet = "";
+        # Security
+        NoNewPrivileges = true;
+        # Process Properties
+        LimitNOFILE = "${toString ulimitNofile}";
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @memlock @module @mount @obsolete @privileged @raw-io @reboot @resources @setuid @swap";
       };
     };
   };
diff --git a/nixpkgs/nixos/modules/services/games/quake3-server.nix b/nixpkgs/nixos/modules/services/games/quake3-server.nix
new file mode 100644
index 000000000000..1dc01260e8fa
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/quake3-server.nix
@@ -0,0 +1,111 @@
+{ config, pkgs, lib, ... }:
+with lib;
+
+let
+  cfg = config.services.quake3-server;
+  configFile = pkgs.writeText "q3ds-extra.cfg" ''
+    set net_port ${builtins.toString cfg.port}
+
+    ${cfg.extraConfig}
+  '';
+  defaultBaseq3 = pkgs.requireFile rec {
+    name = "baseq3";
+    hashMode = "recursive";
+    sha256 = "5dd8ee09eabd45e80450f31d7a8b69b846f59738726929298d8a813ce5725ed3";
+    message = ''
+      Unfortunately, we cannot download ${name} automatically.
+      Please purchase a legitimate copy of Quake 3 and change into the installation directory.
+
+      You can either add all relevant files to the nix-store like this:
+      mkdir /tmp/baseq3
+      cp baseq3/pak*.pk3 /tmp/baseq3
+      nix-store --add-fixed sha256 --recursive /tmp/baseq3
+
+      Alternatively you can set services.quake3-server.baseq3 to a path and copy the baseq3 directory into
+      $services.quake3-server.baseq3/.q3a/
+    '';
+  };
+  home = pkgs.runCommand "quake3-home" {} ''
+      mkdir -p $out/.q3a/baseq3
+
+      for file in ${cfg.baseq3}/*; do
+        ln -s $file $out/.q3a/baseq3/$(basename $file)
+      done
+
+      ln -s ${configFile} $out/.q3a/baseq3/nix.cfg
+  '';
+in {
+  options = {
+    services.quake3-server = {
+      enable = mkEnableOption "Quake 3 dedicated server";
+
+      port = mkOption {
+        type = types.port;
+        default = 27960;
+        description = ''
+          UDP Port the server should listen on.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Open the firewall.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          seta rconPassword "superSecret"      // sets RCON password for remote console
+          seta sv_hostname "My Quake 3 server"      // name that appears in server list
+        '';
+        description = ''
+          Extra configuration options. Note that options changed via RCON will not be persisted. To list all possible
+          options, use "cvarlist 1" via RCON.
+        '';
+      };
+
+      baseq3 = mkOption {
+        type = types.either types.package types.path;
+        default = defaultBaseq3;
+        example = "/var/lib/q3ds";
+        description = ''
+          Path to the baseq3 files (pak*.pk3). If this is on the nix store (type = package) all .pk3 files should be saved
+          in the top-level directory. If this is on another filesystem (e.g /var/lib/baseq3) the .pk3 files are searched in
+          $baseq3/.q3a/baseq3/
+        '';
+      };
+    };
+  };
+
+  config = let
+    baseq3InStore = builtins.typeOf cfg.baseq3 == "set";
+  in mkIf cfg.enable {
+    networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+    systemd.services.q3ds = {
+      description = "Quake 3 dedicated server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+
+      environment.HOME = if baseq3InStore then home else cfg.baseq3;
+
+      serviceConfig = with lib; {
+        Restart = "always";
+        DynamicUser = true;
+        WorkingDirectory = home;
+
+        # It is possible to alter configuration files via RCON. To ensure reproducibility we have to prevent this
+        ReadOnlyPaths = if baseq3InStore then home else cfg.baseq3;
+        ExecStartPre = optionalString (!baseq3InStore) "+${pkgs.coreutils}/bin/cp ${configFile} ${cfg.baseq3}/.q3a/baseq3/nix.cfg";
+
+        ExecStart = "${pkgs.ioquake3}/ioq3ded.x86_64 +exec nix.cfg";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ f4814n ];
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/pcscd.nix b/nixpkgs/nixos/modules/services/hardware/pcscd.nix
index f3fc4c3cc79e..54b6693f85a0 100644
--- a/nixpkgs/nixos/modules/services/hardware/pcscd.nix
+++ b/nixpkgs/nixos/modules/services/hardware/pcscd.nix
@@ -10,39 +10,37 @@ let
     paths = map (p: "${p}/pcsc/drivers") config.services.pcscd.plugins;
   };
 
-in {
+in
+{
 
   ###### interface
 
-  options = {
+  options.services.pcscd = {
+    enable = mkEnableOption "PCSC-Lite daemon";
 
-    services.pcscd = {
-      enable = mkEnableOption "PCSC-Lite daemon";
-
-      plugins = mkOption {
-        type = types.listOf types.package;
-        default = [ pkgs.ccid ];
-        defaultText = "[ pkgs.ccid ]";
-        example = literalExample "[ pkgs.pcsc-cyberjack ]";
-        description = "Plugin packages to be used for PCSC-Lite.";
-      };
-
-      readerConfig = mkOption {
-        type = types.lines;
-        default = "";
-        example = ''
-          FRIENDLYNAME      "Some serial reader"
-          DEVICENAME        /dev/ttyS0
-          LIBPATH           /path/to/serial_reader.so
-          CHANNELID         1
-        '';
-        description = ''
-          Configuration for devices that aren't hotpluggable.
+    plugins = mkOption {
+      type = types.listOf types.package;
+      default = [ pkgs.ccid ];
+      defaultText = "[ pkgs.ccid ]";
+      example = literalExample "[ pkgs.pcsc-cyberjack ]";
+      description = "Plugin packages to be used for PCSC-Lite.";
+    };
 
-          See <citerefentry><refentrytitle>reader.conf</refentrytitle>
-          <manvolnum>5</manvolnum></citerefentry> for valid options.
-        '';
-      };
+    readerConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        FRIENDLYNAME      "Some serial reader"
+        DEVICENAME        /dev/ttyS0
+        LIBPATH           /path/to/serial_reader.so
+        CHANNELID         1
+      '';
+      description = ''
+        Configuration for devices that aren't hotpluggable.
+
+        See <citerefentry><refentrytitle>reader.conf</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for valid options.
+      '';
     };
   };
 
@@ -50,20 +48,15 @@ in {
 
   config = mkIf config.services.pcscd.enable {
 
-    systemd.sockets.pcscd = {
-      description = "PCSC-Lite Socket";
-      wantedBy = [ "sockets.target" ];
-      before = [ "multi-user.target" ];
-      socketConfig.ListenStream = "/run/pcscd/pcscd.comm";
-    };
+    environment.etc."reader.conf".source = cfgFile;
+
+    systemd.packages = [ (getBin pkgs.pcsclite) ];
+
+    systemd.sockets.pcscd.wantedBy = [ "sockets.target" ];
 
     systemd.services.pcscd = {
-      description = "PCSC-Lite daemon";
       environment.PCSCLITE_HP_DROPDIR = pluginEnv;
-      serviceConfig = {
-        ExecStart = "${getBin pkgs.pcsclite}/sbin/pcscd -f -x -c ${cfgFile}";
-        ExecReload = "${getBin pkgs.pcsclite}/sbin/pcscd -H";
-      };
+      restartTriggers = [ "/etc/reader.conf" ];
     };
   };
 }
diff --git a/nixpkgs/nixos/modules/services/hardware/sane.nix b/nixpkgs/nixos/modules/services/hardware/sane.nix
index e5a01a8a27d9..8c1bde7b4158 100644
--- a/nixpkgs/nixos/modules/services/hardware/sane.nix
+++ b/nixpkgs/nixos/modules/services/hardware/sane.nix
@@ -163,6 +163,7 @@ in
       users.users.scanner = {
         uid = config.ids.uids.scanner;
         group = "scanner";
+        extraGroups = [ "lp" ] ++ optionals config.services.avahi.enable [ "avahi" ];
       };
     })
   ];
diff --git a/nixpkgs/nixos/modules/services/mail/dovecot.nix b/nixpkgs/nixos/modules/services/mail/dovecot.nix
index a2298152b023..1ccfb357750b 100644
--- a/nixpkgs/nixos/modules/services/mail/dovecot.nix
+++ b/nixpkgs/nixos/modules/services/mail/dovecot.nix
@@ -405,7 +405,7 @@ in
         };
     } // optionalAttrs (cfg.createMailUser && cfg.mailUser != null) {
       ${cfg.mailUser} =
-        { description = "Virtual Mail User"; } // optionalAttrs (cfg.mailGroup != null)
+        { description = "Virtual Mail User"; isSystemUser = true; } // optionalAttrs (cfg.mailGroup != null)
           { group = cfg.mailGroup; };
     };
 
diff --git a/nixpkgs/nixos/modules/services/mail/exim.nix b/nixpkgs/nixos/modules/services/mail/exim.nix
index 892fbd33214a..8927d84b478c 100644
--- a/nixpkgs/nixos/modules/services/mail/exim.nix
+++ b/nixpkgs/nixos/modules/services/mail/exim.nix
@@ -67,6 +67,13 @@ in
         '';
       };
 
+      queueRunnerInterval = mkOption {
+        type = types.str;
+        default = "5m";
+        description = ''
+          How often to spawn a new queue runner.
+        '';
+      };
     };
 
   };
@@ -104,7 +111,7 @@ in
       wantedBy = [ "multi-user.target" ];
       restartTriggers = [ config.environment.etc."exim.conf".source ];
       serviceConfig = {
-        ExecStart   = "${cfg.package}/bin/exim -bdf -q30m";
+        ExecStart   = "${cfg.package}/bin/exim -bdf -q${cfg.queueRunnerInterval}";
         ExecReload  = "${coreutils}/bin/kill -HUP $MAINPID";
       };
       preStart = ''
diff --git a/nixpkgs/nixos/modules/services/mail/mailman.nix b/nixpkgs/nixos/modules/services/mail/mailman.nix
index 136dff72a34d..64fb30d41567 100644
--- a/nixpkgs/nixos/modules/services/mail/mailman.nix
+++ b/nixpkgs/nixos/modules/services/mail/mailman.nix
@@ -154,7 +154,7 @@ in {
 
         baseUrl = mkOption {
           type = types.str;
-          default = "http://localhost/hyperkitty/";
+          default = "http://localhost:18507/archives/";
           description = ''
             Where can Mailman connect to Hyperkitty's internal API, preferably on
             localhost?
@@ -387,6 +387,7 @@ in {
           plugins = ["python3"];
           home = pkgs.mailman-web;
           module = "mailman_web.wsgi";
+          http = "127.0.0.1:18507";
         };
         uwsgiConfigFile = pkgs.writeText "uwsgi-mailman.json" (builtins.toJSON uwsgiConfig);
       in {
@@ -449,7 +450,7 @@ in {
   };
 
   meta = {
-    maintainers = with lib.maintainers; [ lheckemann ];
+    maintainers = with lib.maintainers; [ lheckemann qyliss ];
     doc = ./mailman.xml;
   };
 
diff --git a/nixpkgs/nixos/modules/services/mail/nullmailer.nix b/nixpkgs/nixos/modules/services/mail/nullmailer.nix
index fe3f8ef9b391..09874ca0ed75 100644
--- a/nixpkgs/nixos/modules/services/mail/nullmailer.nix
+++ b/nixpkgs/nixos/modules/services/mail/nullmailer.nix
@@ -204,6 +204,7 @@ with lib;
       users.${cfg.user} = {
         description = "Nullmailer relay-only mta user";
         group = cfg.group;
+        isSystemUser = true;
       };
 
       groups.${cfg.group} = { };
diff --git a/nixpkgs/nixos/modules/services/mail/spamassassin.nix b/nixpkgs/nixos/modules/services/mail/spamassassin.nix
index 4e642542ec66..ac878222b26a 100644
--- a/nixpkgs/nixos/modules/services/mail/spamassassin.nix
+++ b/nixpkgs/nixos/modules/services/mail/spamassassin.nix
@@ -126,19 +126,36 @@ in
     };
 
     systemd.services.sa-update = {
+      # Needs to be able to contact the update server.
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "spamd";
+        Group = "spamd";
+        StateDirectory = "spamassassin";
+        ExecStartPost = "+${pkgs.systemd}/bin/systemctl -q --no-block try-reload-or-restart spamd.service";
+      };
+
       script = ''
         set +e
-        ${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/" spamd
-
-        v=$?
+        ${pkgs.spamassassin}/bin/sa-update --verbose --gpghomedir=/var/lib/spamassassin/sa-update-keys/
+        rc=$?
         set -e
-        if [ $v -gt 1 ]; then
-          echo "sa-update execution error"
-          exit $v
+
+        if [[ $rc -gt 1 ]]; then
+          # sa-update failed.
+          exit $rc
         fi
-        if [ $v -eq 0 ]; then
-          systemctl reload spamd.service
+
+        if [[ $rc -eq 1 ]]; then
+          # No update was available, exit successfully.
+          exit 0
         fi
+
+        # An update was available and installed. Compile the rules.
+        ${pkgs.spamassassin}/bin/sa-compile
       '';
     };
 
@@ -153,32 +170,22 @@ in
     };
 
     systemd.services.spamd = {
-      description = "Spam Assassin Server";
+      description = "SpamAssassin Server";
 
       wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" ];
+      wants = [ "sa-update.service" ];
+      after = [
+        "network.target"
+        "sa-update.service"
+      ];
 
       serviceConfig = {
-        ExecStart = "${pkgs.spamassassin}/bin/spamd ${optionalString cfg.debug "-D"} --username=spamd --groupname=spamd --virtual-config-dir=/var/lib/spamassassin/user-%u --allow-tell --pidfile=/run/spamd.pid";
-        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        User = "spamd";
+        Group = "spamd";
+        ExecStart = "+${pkgs.spamassassin}/bin/spamd ${optionalString cfg.debug "-D"} --username=spamd --groupname=spamd --virtual-config-dir=%S/spamassassin/user-%u --allow-tell --pidfile=/run/spamd.pid";
+        ExecReload = "+${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        StateDirectory = "spamassassin";
       };
-
-      # 0 and 1 no error, exitcode > 1 means error:
-      # https://spamassassin.apache.org/full/3.1.x/doc/sa-update.html#exit_codes
-      preStart = ''
-        echo "Recreating '/var/lib/spamasassin' with creating '3.004001' (or similar) and 'sa-update-keys'"
-        mkdir -p /var/lib/spamassassin
-        chown spamd:spamd /var/lib/spamassassin -R
-        set +e
-        ${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/" spamd
-        v=$?
-        set -e
-        if [ $v -gt 1 ]; then
-          echo "sa-update execution error"
-          exit $v
-        fi
-        chown spamd:spamd /var/lib/spamassassin -R
-      '';
     };
   };
 }
diff --git a/nixpkgs/nixos/modules/services/misc/bazarr.nix b/nixpkgs/nixos/modules/services/misc/bazarr.nix
index d3fd5b08cc84..99343a146a7a 100644
--- a/nixpkgs/nixos/modules/services/misc/bazarr.nix
+++ b/nixpkgs/nixos/modules/services/misc/bazarr.nix
@@ -64,6 +64,7 @@ in
 
     users.users = mkIf (cfg.user == "bazarr") {
       bazarr = {
+        isSystemUser = true;
         group = cfg.group;
         home = "/var/lib/${config.systemd.services.bazarr.serviceConfig.StateDirectory}";
       };
diff --git a/nixpkgs/nixos/modules/services/misc/etebase-server.nix b/nixpkgs/nixos/modules/services/misc/etebase-server.nix
index 31e0952b5b98..b6bd6e9fd37b 100644
--- a/nixpkgs/nixos/modules/services/misc/etebase-server.nix
+++ b/nixpkgs/nixos/modules/services/misc/etebase-server.nix
@@ -192,8 +192,8 @@ in
         # Auto-migrate on first run or if the package has changed
         versionFile="${cfg.dataDir}/src-version"
         if [[ $(cat "$versionFile" 2>/dev/null) != ${pkgs.etebase-server} ]]; then
-          ${pythonEnv}/bin/etebase-server migrate
-          ${pythonEnv}/bin/etebase-server collectstatic
+          ${pythonEnv}/bin/etebase-server migrate --no-input
+          ${pythonEnv}/bin/etebase-server collectstatic --no-input --clear
           echo ${pkgs.etebase-server} > "$versionFile"
         fi
       '';
@@ -211,6 +211,7 @@ in
 
     users = optionalAttrs (cfg.user == defaultUser) {
       users.${defaultUser} = {
+        isSystemUser = true;
         group = defaultUser;
         home = cfg.dataDir;
       };
diff --git a/nixpkgs/nixos/modules/services/misc/home-assistant.nix b/nixpkgs/nixos/modules/services/misc/home-assistant.nix
index 2787c975b352..0590f54ae60e 100644
--- a/nixpkgs/nixos/modules/services/misc/home-assistant.nix
+++ b/nixpkgs/nixos/modules/services/misc/home-assistant.nix
@@ -50,15 +50,10 @@ let
   # List of components used in config
   extraComponents = filter useComponent availableComponents;
 
-  testedPackage = if (cfg.autoExtraComponents && cfg.config != null)
+  package = if (cfg.autoExtraComponents && cfg.config != null)
     then (cfg.package.override { inherit extraComponents; })
     else cfg.package;
 
-  # overridePythonAttrs has to be applied after override
-  package = testedPackage.overridePythonAttrs (oldAttrs: {
-    doCheck = false;
-  });
-
   # If you are changing this, please update the description in applyDefaultConfig
   defaultConfig = {
     homeassistant.time_zone = config.time.timeZone;
@@ -188,9 +183,13 @@ in {
     };
 
     package = mkOption {
-      default = pkgs.home-assistant;
+      default = pkgs.home-assistant.overrideAttrs (oldAttrs: {
+        doInstallCheck = false;
+      });
       defaultText = literalExample ''
-        pkgs.home-assistant
+        pkgs.home-assistant.overrideAttrs (oldAttrs: {
+          doInstallCheck = false;
+        })
       '';
       type = types.package;
       example = literalExample ''
@@ -199,12 +198,11 @@ in {
         }
       '';
       description = ''
-        Home Assistant package to use. Tests are automatically disabled, as they take a considerable amout of time to complete.
+        Home Assistant package to use. By default the tests are disabled, as they take a considerable amout of time to complete.
         Override <literal>extraPackages</literal> or <literal>extraComponents</literal> in order to add additional dependencies.
         If you specify <option>config</option> and do not set <option>autoExtraComponents</option>
         to <literal>false</literal>, overriding <literal>extraComponents</literal> will have no effect.
-        Avoid <literal>home-assistant.overridePythonAttrs</literal> if you use
-        <literal>autoExtraComponents</literal>.
+        Avoid <literal>home-assistant.overridePythonAttrs</literal> if you use <literal>autoExtraComponents</literal>.
       '';
     };
 
diff --git a/nixpkgs/nixos/modules/services/misc/jellyfin.nix b/nixpkgs/nixos/modules/services/misc/jellyfin.nix
index 6a47dc3628f4..c1b45864041b 100644
--- a/nixpkgs/nixos/modules/services/misc/jellyfin.nix
+++ b/nixpkgs/nixos/modules/services/misc/jellyfin.nix
@@ -18,6 +18,7 @@ in
 
       package = mkOption {
         type = types.package;
+        default = pkgs.jellyfin;
         example = literalExample "pkgs.jellyfin";
         description = ''
           Jellyfin package to use.
@@ -29,6 +30,16 @@ in
         default = "jellyfin";
         description = "Group under which jellyfin runs.";
       };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Open the default ports in the firewall for the media server. The
+          HTTP/HTTPS ports can be changed in the Web UI, so this option should
+          only be used if they are unchanged.
+        '';
+      };
     };
   };
 
@@ -88,11 +99,6 @@ in
       };
     };
 
-    services.jellyfin.package = mkDefault (
-      if versionAtLeast config.system.stateVersion "20.09" then pkgs.jellyfin
-        else pkgs.jellyfin_10_5
-    );
-
     users.users = mkIf (cfg.user == "jellyfin") {
       jellyfin = {
         group = cfg.group;
@@ -104,6 +110,12 @@ in
       jellyfin = {};
     };
 
+    networking.firewall = mkIf cfg.openFirewall {
+      # from https://jellyfin.org/docs/general/networking/index.html
+      allowedTCPPorts = [ 8096 8920 ];
+      allowedUDPPorts = [ 1900 7359 ];
+    };
+
   };
 
   meta.maintainers = with lib.maintainers; [ minijackson ];
diff --git a/nixpkgs/nixos/modules/services/misc/matrix-appservice-irc.nix b/nixpkgs/nixos/modules/services/misc/matrix-appservice-irc.nix
index 63dc313ad10b..a0a5973d30f2 100644
--- a/nixpkgs/nixos/modules/services/misc/matrix-appservice-irc.nix
+++ b/nixpkgs/nixos/modules/services/misc/matrix-appservice-irc.nix
@@ -214,7 +214,8 @@ in {
         PrivateMounts = true;
         SystemCallFilter = "~@aio @clock @cpu-emulation @debug @keyring @memlock @module @mount @obsolete @raw-io @setuid @swap";
         SystemCallArchitectures = "native";
-        RestrictAddressFamilies = "AF_INET AF_INET6";
+        # AF_UNIX is required to connect to a postgres socket.
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
       };
     };
 
diff --git a/nixpkgs/nixos/modules/services/misc/matrix-synapse.xml b/nixpkgs/nixos/modules/services/misc/matrix-synapse.xml
index 358b631eb485..41a56df0f2b5 100644
--- a/nixpkgs/nixos/modules/services/misc/matrix-synapse.xml
+++ b/nixpkgs/nixos/modules/services/misc/matrix-synapse.xml
@@ -33,11 +33,11 @@
    <link xlink:href="https://github.com/matrix-org/synapse#synapse-installation">
    installation instructions of Synapse </link>.
 <programlisting>
-{ pkgs, ... }:
+{ pkgs, lib, ... }:
 let
   fqdn =
     let
-      join = hostName: domain: hostName + optionalString (domain != null) ".${domain}";
+      join = hostName: domain: hostName + lib.optionalString (domain != null) ".${domain}";
     in join config.networking.hostName config.networking.domain;
 in {
   networking = {
@@ -132,7 +132,7 @@ in {
       }
     ];
   };
-};
+}
 </programlisting>
   </para>
 
diff --git a/nixpkgs/nixos/modules/services/misc/nix-daemon.nix b/nixpkgs/nixos/modules/services/misc/nix-daemon.nix
index 64bdbf159d51..133e96da0ec8 100644
--- a/nixpkgs/nixos/modules/services/misc/nix-daemon.nix
+++ b/nixpkgs/nixos/modules/services/misc/nix-daemon.nix
@@ -21,6 +21,7 @@ let
          calls in `libstore/build.cc', don't add any supplementary group
          here except "nixbld".  */
       uid = builtins.add config.ids.uids.nixbld nr;
+      isSystemUser = true;
       group = "nixbld";
       extraGroups = [ "nixbld" ];
     };
diff --git a/nixpkgs/nixos/modules/services/misc/podgrab.nix b/nixpkgs/nixos/modules/services/misc/podgrab.nix
new file mode 100644
index 000000000000..7077408b7942
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/podgrab.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.podgrab;
+in
+{
+  options.services.podgrab = with lib; {
+    enable = mkEnableOption "Podgrab, a self-hosted podcast manager";
+
+    passwordFile = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "/run/secrets/password.env";
+      description = ''
+        The path to a file containing the PASSWORD environment variable
+        definition for Podgrab's authentification.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      example = 4242;
+      description = "The port on which Podgrab will listen for incoming HTTP traffic.";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.podgrab = {
+      description = "Podgrab podcast manager";
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        CONFIG = "/var/lib/podgrab/config";
+        DATA = "/var/lib/podgrab/data";
+        GIN_MODE = "release";
+        PORT = toString cfg.port;
+      };
+      serviceConfig = {
+        DynamicUser = true;
+        EnvironmentFile = lib.optional (cfg.passwordFile != null) [
+          cfg.passwordFile
+        ];
+        ExecStart = "${pkgs.podgrab}/bin/podgrab";
+        WorkingDirectory = "${pkgs.podgrab}/share";
+        StateDirectory = [ "podgrab/config" "podgrab/data" ];
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ ambroisie ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/nagios.nix b/nixpkgs/nixos/modules/services/monitoring/nagios.nix
index 9ac6869068f2..61214508a9c6 100644
--- a/nixpkgs/nixos/modules/services/monitoring/nagios.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/nagios.nix
@@ -192,6 +192,7 @@ in
       path     = [ pkgs.nagios ] ++ cfg.plugins;
       wantedBy = [ "multi-user.target" ];
       after    = [ "network.target" ];
+      restartTriggers = [ nagiosCfgFile ];
 
       serviceConfig = {
         User = "nagios";
@@ -201,7 +202,6 @@ in
         LogsDirectory = "nagios";
         StateDirectory = "nagios";
         ExecStart = "${pkgs.nagios}/bin/nagios /etc/nagios.cfg";
-        X-ReloadIfChanged = nagiosCfgFile;
       };
     };
 
diff --git a/nixpkgs/nixos/modules/services/monitoring/tuptime.nix b/nixpkgs/nixos/modules/services/monitoring/tuptime.nix
index 8f79d9165990..17c5c1f56eaf 100644
--- a/nixpkgs/nixos/modules/services/monitoring/tuptime.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/tuptime.nix
@@ -34,7 +34,10 @@ in {
 
     users = {
       groups._tuptime.members = [ "_tuptime" ];
-      users._tuptime.description = "tuptime database owner";
+      users._tuptime = {
+        isSystemUser = true;
+        description = "tuptime database owner";
+      };
     };
 
     systemd = {
diff --git a/nixpkgs/nixos/modules/services/monitoring/vnstat.nix b/nixpkgs/nixos/modules/services/monitoring/vnstat.nix
index e9bedb704a43..5e19c399568d 100644
--- a/nixpkgs/nixos/modules/services/monitoring/vnstat.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/vnstat.nix
@@ -6,21 +6,21 @@ let
   cfg = config.services.vnstat;
 in {
   options.services.vnstat = {
-    enable = mkOption {
-      type = types.bool;
-      default = false;
-      description = ''
-        Whether to enable update of network usage statistics via vnstatd.
-      '';
-    };
+    enable = mkEnableOption "update of network usage statistics via vnstatd";
   };
 
   config = mkIf cfg.enable {
-    users.users.vnstatd = {
-      isSystemUser = true;
-      description = "vnstat daemon user";
-      home = "/var/lib/vnstat";
-      createHome = true;
+
+    environment.systemPackages = [ pkgs.vnstat ];
+
+    users = {
+      groups.vnstatd = {};
+
+      users.vnstatd = {
+        isSystemUser = true;
+        group = "vnstatd";
+        description = "vnstat daemon user";
+      };
     };
 
     systemd.services.vnstat = {
@@ -33,7 +33,6 @@ in {
         "man:vnstat(1)"
         "man:vnstat.conf(5)"
       ];
-      preStart = "chmod 755 /var/lib/vnstat";
       serviceConfig = {
         ExecStart = "${pkgs.vnstat}/bin/vnstatd -n";
         ExecReload = "${pkgs.procps}/bin/kill -HUP $MAINPID";
@@ -52,7 +51,10 @@ in {
         RestrictNamespaces = true;
 
         User = "vnstatd";
+        Group = "vnstatd";
       };
     };
   };
+
+  meta.maintainers = [ maintainers.evils ];
 }
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/ipfs.nix b/nixpkgs/nixos/modules/services/network-filesystems/ipfs.nix
index 2082d513161e..6d8dfcce933c 100644
--- a/nixpkgs/nixos/modules/services/network-filesystems/ipfs.nix
+++ b/nixpkgs/nixos/modules/services/network-filesystems/ipfs.nix
@@ -216,14 +216,11 @@ in {
 
     systemd.packages = [ cfg.package ];
 
-    systemd.services.ipfs-init = {
-      description = "IPFS Initializer";
-
+    systemd.services.ipfs = {
+      path = [ "/run/wrappers" cfg.package ];
       environment.IPFS_PATH = cfg.dataDir;
 
-      path = [ cfg.package ];
-
-      script = ''
+      preStart = ''
         if [[ ! -f ${cfg.dataDir}/config ]]; then
           ipfs init ${optionalString cfg.emptyRepo "-e"} \
             ${optionalString (! cfg.localDiscovery) "--profile=server"}
@@ -233,26 +230,7 @@ in {
             else "ipfs config profile apply server"
           }
         fi
-      '';
-
-      wantedBy = [ "default.target" ];
-
-      serviceConfig = {
-        Type = "oneshot";
-        RemainAfterExit = true;
-        User = cfg.user;
-        Group = cfg.group;
-      };
-    };
-
-    systemd.services.ipfs = {
-      path = [ "/run/wrappers" cfg.package ];
-      environment.IPFS_PATH = cfg.dataDir;
-
-      wants = [ "ipfs-init.service" ];
-      after = [ "ipfs-init.service" ];
-
-      preStart = optionalString cfg.autoMount ''
+      '' + optionalString cfg.autoMount ''
         ipfs --local config Mounts.FuseAllowOther --json true
         ipfs --local config Mounts.IPFS ${cfg.ipfsMountDir}
         ipfs --local config Mounts.IPNS ${cfg.ipnsMountDir}
@@ -296,7 +274,7 @@ in {
 
     systemd.sockets.ipfs-api = {
       wantedBy = [ "sockets.target" ];
-      # We also include "%t/ipfs.sock" because tere is no way to put the "%t"
+      # We also include "%t/ipfs.sock" because there is no way to put the "%t"
       # in the multiaddr.
       socketConfig.ListenStream = let
           fromCfg = multiaddrToListenStream cfg.apiAddress;
diff --git a/nixpkgs/nixos/modules/services/networking/babeld.nix b/nixpkgs/nixos/modules/services/networking/babeld.nix
index 272c58ecd7ff..97dca002a007 100644
--- a/nixpkgs/nixos/modules/services/networking/babeld.nix
+++ b/nixpkgs/nixos/modules/services/networking/babeld.nix
@@ -19,7 +19,10 @@ let
     "interface ${name} ${paramsString interface}\n";
 
   configFile = with cfg; pkgs.writeText "babeld.conf" (
-    (optionalString (cfg.interfaceDefaults != null) ''
+    ''
+      skip-kernel-setup true
+    ''
+    + (optionalString (cfg.interfaceDefaults != null) ''
       default ${paramsString cfg.interfaceDefaults}
     '')
     + (concatMapStrings interfaceConfig (attrNames cfg.interfaces))
@@ -84,13 +87,22 @@ in
 
   config = mkIf config.services.babeld.enable {
 
+    boot.kernel.sysctl = {
+      "net.ipv6.conf.all.forwarding" = 1;
+      "net.ipv6.conf.all.accept_redirects" = 0;
+      "net.ipv4.conf.all.forwarding" = 1;
+      "net.ipv4.conf.all.rp_filter" = 0;
+    } // lib.mapAttrs' (ifname: _: lib.nameValuePair "net.ipv4.conf.${ifname}.rp_filter" (lib.mkDefault 0)) config.services.babeld.interfaces;
+
     systemd.services.babeld = {
       description = "Babel routing daemon";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         ExecStart = "${pkgs.babeld}/bin/babeld -c ${configFile} -I /run/babeld/babeld.pid -S /var/lib/babeld/state";
+        AmbientCapabilities = [ "CAP_NET_ADMIN" ];
         CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
+        DynamicUser = true;
         IPAddressAllow = [ "fe80::/64" "ff00::/8" "::1/128" "127.0.0.0/8" ];
         IPAddressDeny = "any";
         LockPersonality = true;
@@ -98,11 +110,11 @@ in
         MemoryDenyWriteExecute = true;
         ProtectSystem = "strict";
         ProtectClock = true;
-        ProtectKernelTunables = false; # Couldn't write sysctl: Read-only file system
+        ProtectKernelTunables = true;
         ProtectKernelModules = true;
         ProtectKernelLogs = true;
         ProtectControlGroups = true;
-        RestrictAddressFamilies = [ "AF_NETLINK" "AF_INET6" ];
+        RestrictAddressFamilies = [ "AF_NETLINK" "AF_INET6" "AF_INET" ];
         RestrictNamespaces = true;
         RestrictRealtime = true;
         RestrictSUIDSGID = true;
diff --git a/nixpkgs/nixos/modules/services/networking/bird.nix b/nixpkgs/nixos/modules/services/networking/bird.nix
index 6d7e7760d94e..1923afdf83f2 100644
--- a/nixpkgs/nixos/modules/services/networking/bird.nix
+++ b/nixpkgs/nixos/modules/services/networking/bird.nix
@@ -73,6 +73,7 @@ let
           users.${variant} = {
             description = "BIRD Internet Routing Daemon user";
             group = variant;
+            isSystemUser = true;
           };
           groups.${variant} = {};
         };
diff --git a/nixpkgs/nixos/modules/services/networking/dnsdist.nix b/nixpkgs/nixos/modules/services/networking/dnsdist.nix
index 3584915d0aa3..c7c6a79864cb 100644
--- a/nixpkgs/nixos/modules/services/networking/dnsdist.nix
+++ b/nixpkgs/nixos/modules/services/networking/dnsdist.nix
@@ -4,7 +4,7 @@ with lib;
 
 let
   cfg = config.services.dnsdist;
-  configFile = pkgs.writeText "dndist.conf" ''
+  configFile = pkgs.writeText "dnsdist.conf" ''
     setLocal('${cfg.listenAddress}:${toString cfg.listenPort}')
     ${cfg.extraConfig}
   '';
diff --git a/nixpkgs/nixos/modules/services/networking/doh-proxy-rust.nix b/nixpkgs/nixos/modules/services/networking/doh-proxy-rust.nix
new file mode 100644
index 000000000000..0e55bc386653
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/doh-proxy-rust.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.doh-proxy-rust;
+
+in {
+
+  options.services.doh-proxy-rust = {
+
+    enable = mkEnableOption "doh-proxy-rust";
+
+    flags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = literalExample [ "--server-address=9.9.9.9:53" ];
+      description = ''
+        A list of command-line flags to pass to doh-proxy. For details on the
+        available options, see <link xlink:href="https://github.com/jedisct1/doh-server#usage"/>.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.doh-proxy-rust = {
+      description = "doh-proxy-rust";
+      after = [ "network.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.doh-proxy-rust}/bin/doh-proxy ${escapeShellArgs cfg.flags}";
+        Restart = "always";
+        RestartSec = 10;
+        DynamicUser = true;
+
+        CapabilityBoundingSet = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        ProtectClock = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        RemoveIPC = true;
+        RestrictAddressFamilies = "AF_INET AF_INET6";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [ "@system-service" "~@privileged @resources" ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ stephank ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/gobgpd.nix b/nixpkgs/nixos/modules/services/networking/gobgpd.nix
new file mode 100644
index 000000000000..d3b03471f4eb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/gobgpd.nix
@@ -0,0 +1,64 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gobgpd;
+  format = pkgs.formats.toml { };
+  confFile = format.generate "gobgpd.conf" cfg.settings;
+in {
+  options.services.gobgpd = {
+    enable = mkEnableOption "GoBGP Routing Daemon";
+
+    settings = mkOption {
+      type = format.type;
+      default = { };
+      description = ''
+        GoBGP configuration. Refer to
+        <link xlink:href="https://github.com/osrg/gobgp#documentation"/>
+        for details on supported values.
+      '';
+      example = literalExample ''
+        {
+          global = {
+            config = {
+              as = 64512;
+              router-id = "192.168.255.1";
+            };
+          };
+          neighbors = [
+            {
+              config = {
+                neighbor-address = "10.0.255.1";
+                peer-as = 65001;
+              };
+            }
+            {
+              config = {
+                neighbor-address = "10.0.255.2";
+                peer-as = 65002;
+              };
+            }
+          ];
+        }
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.gobgpd ];
+    systemd.services.gobgpd = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = "GoBGP Routing Daemon";
+      serviceConfig = {
+        Type = "notify";
+        ExecStartPre = "${pkgs.gobgpd}/bin/gobgpd -f ${confFile} -d";
+        ExecStart = "${pkgs.gobgpd}/bin/gobgpd -f ${confFile} --sdnotify";
+        ExecReload = "${pkgs.gobgpd}/bin/gobgpd -r";
+        DynamicUser = true;
+        AmbientCapabilities = "cap_net_bind_service";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/iscsi/initiator.nix b/nixpkgs/nixos/modules/services/networking/iscsi/initiator.nix
new file mode 100644
index 000000000000..cbc919a2f76c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/iscsi/initiator.nix
@@ -0,0 +1,84 @@
+{ config, lib, pkgs, ... }: with lib;
+let
+  cfg = config.services.openiscsi;
+in
+{
+  options.services.openiscsi = with types; {
+    enable = mkEnableOption "the openiscsi iscsi daemon";
+    enableAutoLoginOut = mkEnableOption ''
+      automatic login and logout of all automatic targets.
+      You probably do not want this.
+    '';
+    discoverPortal = mkOption {
+      type = nullOr str;
+      default = null;
+      description = "Portal to discover targets on";
+    };
+    name = mkOption {
+      type = str;
+      description = "Name of this iscsi initiator";
+      example = "iqn.2020-08.org.linux-iscsi.initiatorhost:example";
+    };
+    package = mkOption {
+      type = package;
+      description = "openiscsi package to use";
+      default = pkgs.openiscsi;
+      defaultText = "pkgs.openiscsi";
+    };
+
+    extraConfig = mkOption {
+      type = str;
+      default = "";
+      description = "Lines to append to default iscsid.conf";
+    };
+
+    extraConfigFile = mkOption {
+      description = ''
+        Append an additional file's contents to /etc/iscsid.conf. Use a non-store path
+        and store passwords in this file.
+      '';
+      default = null;
+      type = nullOr str;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."iscsi/iscsid.conf.fragment".source = pkgs.runCommand "iscsid.conf" {} ''
+      cat "${cfg.package}/etc/iscsi/iscsid.conf" > $out
+      cat << 'EOF' >> $out
+      ${cfg.extraConfig}
+      ${optionalString cfg.enableAutoLoginOut "node.startup = automatic"}
+      EOF
+    '';
+    environment.etc."iscsi/initiatorname.iscsi".text = "InitiatorName=${cfg.name}";
+
+    system.activationScripts.iscsid = let
+      extraCfgDumper = optionalString (cfg.extraConfigFile != null) ''
+        if [ -f "${cfg.extraConfigFile}" ]; then
+          printf "\n# The following is from ${cfg.extraConfigFile}:\n"
+          cat "${cfg.extraConfigFile}"
+        else
+          echo "Warning: services.openiscsi.extraConfigFile ${cfg.extraConfigFile} does not exist!" >&2
+        fi
+      '';
+    in ''
+      (
+        cat ${config.environment.etc."iscsi/iscsid.conf.fragment".source}
+        ${extraCfgDumper}
+      ) > /etc/iscsi/iscsid.conf
+    '';
+
+    systemd.packages = [ cfg.package ];
+
+    systemd.services."iscsid".wantedBy = [ "multi-user.target" ];
+    systemd.sockets."iscsid".wantedBy = [ "sockets.target" ];
+
+    systemd.services."iscsi" = mkIf cfg.enableAutoLoginOut {
+      wantedBy = [ "remote-fs.target" ];
+      serviceConfig.ExecStartPre = mkIf (cfg.discoverPortal != null) "${cfg.package}/bin/iscsiadm --mode discoverydb --type sendtargets --portal ${escapeShellArg cfg.discoverPortal} --discover";
+    };
+
+    environment.systemPackages = [ cfg.package ];
+    boot.kernelModules = [ "iscsi_tcp" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/iscsi/root-initiator.nix b/nixpkgs/nixos/modules/services/networking/iscsi/root-initiator.nix
new file mode 100644
index 000000000000..3274878c4fae
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/iscsi/root-initiator.nix
@@ -0,0 +1,181 @@
+{ config, lib, pkgs, ... }: with lib;
+let
+  cfg = config.boot.iscsi-initiator;
+in
+{
+  # If you're booting entirely off another machine you may want to add
+  # this snippet to always boot the latest "system" version. It is not
+  # enabled by default in case you have an initrd on a local disk:
+  #
+  #     boot.initrd.postMountCommands = ''
+  #       ln -sfn /nix/var/nix/profiles/system/init /mnt-root/init
+  #       stage2Init=/init
+  #     '';
+  #
+  # Note: Theoretically you might want to connect to multiple portals and
+  # log in to multiple targets, however the authors of this module so far
+  # don't have the need or expertise to reasonably implement it. Also,
+  # consider carefully before making your boot chain depend on multiple
+  # machines to be up.
+  options.boot.iscsi-initiator = with types; {
+    name = mkOption {
+      description = ''
+        Name of the iSCSI initiator to boot from. Note, booting from iscsi
+        requires networkd based networking.
+      '';
+      default = null;
+      example = "iqn.2020-08.org.linux-iscsi.initiatorhost:example";
+      type = nullOr str;
+    };
+
+    discoverPortal = mkOption {
+      description = ''
+        iSCSI portal to boot from.
+      '';
+      default = null;
+      example = "192.168.1.1:3260";
+      type = nullOr str;
+    };
+
+    target = mkOption {
+      description = ''
+        Name of the iSCSI target to boot from.
+      '';
+      default = null;
+      example = "iqn.2020-08.org.linux-iscsi.targethost:example";
+      type = nullOr str;
+    };
+
+    logLevel = mkOption {
+      description = ''
+        Higher numbers elicits more logs.
+      '';
+      default = 1;
+      example = 8;
+      type = int;
+    };
+
+    loginAll = mkOption {
+      description = ''
+        Do not log into a specific target on the portal, but to all that we discover.
+        This overrides setting target.
+      '';
+      type = bool;
+      default = false;
+    };
+
+    extraConfig = mkOption {
+      description = "Extra lines to append to /etc/iscsid.conf";
+      default = null;
+      type = nullOr lines;
+    };
+
+    extraConfigFile = mkOption {
+      description = ''
+        Append an additional file's contents to `/etc/iscsid.conf`. Use a non-store path
+        and store passwords in this file. Note: the file specified here must be available
+        in the initrd, see: `boot.initrd.secrets`.
+      '';
+      default = null;
+      type = nullOr str;
+    };
+  };
+
+  config = mkIf (cfg.name != null) {
+    # The "scripted" networking configuration (ie: non-networkd)
+    # doesn't properly order the start and stop of the interfaces, and the
+    # network interfaces are torn down before unmounting disks. Since this
+    # module is specifically for very-early-boot network mounts, we need
+    # the network to stay on.
+    #
+    # We could probably fix the scripted options to properly order, but I'm
+    # not inclined to invest that time today. Hopefully this gets users far
+    # enough along and they can just use networkd.
+    networking.useNetworkd = true;
+    networking.useDHCP = false; # Required to set useNetworkd = true
+
+    boot.initrd = {
+      network.enable = true;
+
+      # By default, the stage-1 disables the network and resets the interfaces
+      # on startup. Since our startup disks are on the network, we can't let
+      # the network not work.
+      network.flushBeforeStage2 = false;
+
+      kernelModules = [ "iscsi_tcp" ];
+
+      extraUtilsCommands = ''
+        copy_bin_and_libs ${pkgs.openiscsi}/bin/iscsid
+        copy_bin_and_libs ${pkgs.openiscsi}/bin/iscsiadm
+        ${optionalString (!config.boot.initrd.network.ssh.enable) "cp -pv ${pkgs.glibc.out}/lib/libnss_files.so.* $out/lib"}
+
+        mkdir -p $out/etc/iscsi
+        cp ${config.environment.etc.hosts.source} $out/etc/hosts
+        cp ${pkgs.openiscsi}/etc/iscsi/iscsid.conf $out/etc/iscsi/iscsid.fragment.conf
+        chmod +w $out/etc/iscsi/iscsid.fragment.conf
+        cat << 'EOF' >> $out/etc/iscsi/iscsid.fragment.conf
+        ${optionalString (cfg.extraConfig != null) cfg.extraConfig}
+        EOF
+      '';
+
+      extraUtilsCommandsTest = ''
+        $out/bin/iscsiadm --version
+      '';
+
+      preLVMCommands = let
+        extraCfgDumper = optionalString (cfg.extraConfigFile != null) ''
+          if [ -f "${cfg.extraConfigFile}" ]; then
+            printf "\n# The following is from ${cfg.extraConfigFile}:\n"
+            cat "${cfg.extraConfigFile}"
+          else
+            echo "Warning: boot.iscsi-initiator.extraConfigFile ${cfg.extraConfigFile} does not exist!" >&2
+          fi
+        '';
+      in ''
+        ${optionalString (!config.boot.initrd.network.ssh.enable) ''
+        # stolen from initrd-ssh.nix
+        echo 'root:x:0:0:root:/root:/bin/ash' > /etc/passwd
+        echo 'passwd: files' > /etc/nsswitch.conf
+      ''}
+
+        cp -f $extraUtils/etc/hosts /etc/hosts
+
+        mkdir -p /etc/iscsi /run/lock/iscsi
+        echo "InitiatorName=${cfg.name}" > /etc/iscsi/initiatorname.iscsi
+
+        (
+          cat "$extraUtils/etc/iscsi/iscsid.fragment.conf"
+          printf "\n"
+          ${optionalString cfg.loginAll ''echo "node.startup = automatic"''}
+          ${extraCfgDumper}
+        ) > /etc/iscsi/iscsid.conf
+
+        iscsid --foreground --no-pid-file --debug ${toString cfg.logLevel} &
+        iscsiadm --mode discoverydb \
+          --type sendtargets \
+          --discover \
+          --portal ${escapeShellArg cfg.discoverPortal} \
+          --debug ${toString cfg.logLevel}
+
+        ${if cfg.loginAll then ''
+        iscsiadm --mode node --loginall all
+      '' else ''
+        iscsiadm --mode node --targetname ${escapeShellArg cfg.target} --login
+      ''}
+        pkill -9 iscsid
+      '';
+    };
+
+    services.openiscsi = {
+      enable = true;
+      inherit (cfg) name;
+    };
+
+    assertions = [
+      {
+        assertion = cfg.loginAll -> cfg.target == null;
+        message = "iSCSI target name is set while login on all portals is enabled.";
+      }
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/iscsi/target.nix b/nixpkgs/nixos/modules/services/networking/iscsi/target.nix
new file mode 100644
index 000000000000..8a10e7d346ae
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/iscsi/target.nix
@@ -0,0 +1,53 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.target;
+in
+{
+  ###### interface
+  options = {
+    services.target = with types; {
+      enable = mkEnableOption "the kernel's LIO iscsi target";
+
+      config = mkOption {
+        type = attrs;
+        default = {};
+        description = ''
+          Content of /etc/target/saveconfig.json
+          This file is normally read and written by targetcli
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.etc."target/saveconfig.json" = {
+      text = builtins.toJSON cfg.config;
+      mode = "0600";
+    };
+
+    environment.systemPackages = with pkgs; [ targetcli ];
+
+    boot.kernelModules = [ "configfs" "target_core_mod" "iscsi_target_mod" ];
+
+    systemd.services.iscsi-target = {
+      enable = true;
+      after = [ "network.target" "local-fs.target" ];
+      requires = [ "sys-kernel-config.mount" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.python3.pkgs.rtslib}/bin/targetctl restore";
+        ExecStop = "${pkgs.python3.pkgs.rtslib}/bin/targetctl clear";
+        RemainAfterExit = "yes";
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /etc/target 0700 root root - -"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mxisd.nix b/nixpkgs/nixos/modules/services/networking/mxisd.nix
index 482d6ff456b1..f29d190c6262 100644
--- a/nixpkgs/nixos/modules/services/networking/mxisd.nix
+++ b/nixpkgs/nixos/modules/services/networking/mxisd.nix
@@ -41,8 +41,8 @@ in {
 
       package = mkOption {
         type = types.package;
-        default = pkgs.mxisd;
-        defaultText = "pkgs.mxisd";
+        default = pkgs.ma1sd;
+        defaultText = "pkgs.ma1sd";
         description = "The mxisd/ma1sd package to use";
       };
 
diff --git a/nixpkgs/nixos/modules/services/networking/ncdns.nix b/nixpkgs/nixos/modules/services/networking/ncdns.nix
index c1832ad17520..d30fe0f6f6d1 100644
--- a/nixpkgs/nixos/modules/services/networking/ncdns.nix
+++ b/nixpkgs/nixos/modules/services/networking/ncdns.nix
@@ -243,8 +243,10 @@ in
         xlog.journal = true;
     };
 
-    users.users.ncdns =
-      { description = "ncdns daemon user"; };
+    users.users.ncdns = {
+      isSystemUser = true;
+      description = "ncdns daemon user";
+    };
 
     systemd.services.ncdns = {
       description = "ncdns daemon";
diff --git a/nixpkgs/nixos/modules/services/networking/nebula.nix b/nixpkgs/nixos/modules/services/networking/nebula.nix
new file mode 100644
index 000000000000..e7ebfe1b4db7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nebula.nix
@@ -0,0 +1,219 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.nebula;
+  enabledNetworks = filterAttrs (n: v: v.enable) cfg.networks;
+
+  format = pkgs.formats.yaml {};
+
+  nameToId = netName: "nebula-${netName}";
+in
+{
+  # Interface
+
+  options = {
+    services.nebula = {
+      networks = mkOption {
+        description = "Nebula network definitions.";
+        default = {};
+        type = types.attrsOf (types.submodule {
+          options = {
+            enable = mkOption {
+              type = types.bool;
+              default = true;
+              description = "Enable or disable this network.";
+            };
+
+            package = mkOption {
+              type = types.package;
+              default = pkgs.nebula;
+              defaultText = "pkgs.nebula";
+              description = "Nebula derivation to use.";
+            };
+
+            ca = mkOption {
+              type = types.path;
+              description = "Path to the certificate authority certificate.";
+              example = "/etc/nebula/ca.crt";
+            };
+
+            cert = mkOption {
+              type = types.path;
+              description = "Path to the host certificate.";
+              example = "/etc/nebula/host.crt";
+            };
+
+            key = mkOption {
+              type = types.path;
+              description = "Path to the host key.";
+              example = "/etc/nebula/host.key";
+            };
+
+            staticHostMap = mkOption {
+              type = types.attrsOf (types.listOf (types.str));
+              default = {};
+              description = ''
+                The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
+                A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
+              '';
+              example = literalExample ''
+                { "192.168.100.1" = [ "100.64.22.11:4242" ]; }
+              '';
+            };
+
+            isLighthouse = mkOption {
+              type = types.bool;
+              default = false;
+              description = "Whether this node is a lighthouse.";
+            };
+
+            lighthouses = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              description = ''
+                List of IPs of lighthouse hosts this node should report to and query from. This should be empty on lighthouse
+                nodes. The IPs should be the lighthouse's Nebula IPs, not their external IPs.
+              '';
+              example = ''[ "192.168.100.1" ]'';
+            };
+
+            listen.host = mkOption {
+              type = types.str;
+              default = "0.0.0.0";
+              description = "IP address to listen on.";
+            };
+
+            listen.port = mkOption {
+              type = types.port;
+              default = 4242;
+              description = "Port number to listen on.";
+            };
+
+            tun.disable = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root).
+              '';
+            };
+
+            tun.device = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = "Name of the tun device. Defaults to nebula.\${networkName}.";
+            };
+
+            firewall.outbound = mkOption {
+              type = types.listOf types.attrs;
+              default = [];
+              description = "Firewall rules for outbound traffic.";
+              example = ''[ { port = "any"; proto = "any"; host = "any"; } ]'';
+            };
+
+            firewall.inbound = mkOption {
+              type = types.listOf types.attrs;
+              default = [];
+              description = "Firewall rules for inbound traffic.";
+              example = ''[ { port = "any"; proto = "any"; host = "any"; } ]'';
+            };
+
+            settings = mkOption {
+              type = format.type;
+              default = {};
+              description = ''
+                Nebula configuration. Refer to
+                <link xlink:href="https://github.com/slackhq/nebula/blob/master/examples/config.yml"/>
+                for details on supported values.
+              '';
+              example = literalExample ''
+                {
+                  lighthouse.dns = {
+                    host = "0.0.0.0";
+                    port = 53;
+                  };
+                }
+              '';
+            };
+          };
+        });
+      };
+    };
+  };
+
+  # Implementation
+  config = mkIf (enabledNetworks != {}) {
+    systemd.services = mkMerge (mapAttrsToList (netName: netCfg:
+      let
+        networkId = nameToId netName;
+        settings = recursiveUpdate {
+          pki = {
+            ca = netCfg.ca;
+            cert = netCfg.cert;
+            key = netCfg.key;
+          };
+          static_host_map = netCfg.staticHostMap;
+          lighthouse = {
+            am_lighthouse = netCfg.isLighthouse;
+            hosts = netCfg.lighthouses;
+          };
+          listen = {
+            host = netCfg.listen.host;
+            port = netCfg.listen.port;
+          };
+          tun = {
+            disabled = netCfg.tun.disable;
+            dev = if (netCfg.tun.device != null) then netCfg.tun.device else "nebula.${netName}";
+          };
+          firewall = {
+            inbound = netCfg.firewall.inbound;
+            outbound = netCfg.firewall.outbound;
+          };
+        } netCfg.settings;
+        configFile = format.generate "nebula-config-${netName}.yml" settings;
+        in
+        {
+          # Create systemd service for Nebula.
+          "nebula@${netName}" = {
+            description = "Nebula VPN service for ${netName}";
+            wants = [ "basic.target" ];
+            after = [ "basic.target" "network.target" ];
+            before = [ "sshd.service" ];
+            wantedBy = [ "multi-user.target" ];
+            serviceConfig = mkMerge [
+              {
+                Type = "simple";
+                Restart = "always";
+                ExecStart = "${netCfg.package}/bin/nebula -config ${configFile}";
+              }
+              # The service needs to launch as root to access the tun device, if it's enabled.
+              (mkIf netCfg.tun.disable {
+                User = networkId;
+                Group = networkId;
+              })
+            ];
+          };
+        }) enabledNetworks);
+
+    # Open the chosen ports for UDP.
+    networking.firewall.allowedUDPPorts =
+      unique (mapAttrsToList (netName: netCfg: netCfg.listen.port) enabledNetworks);
+
+    # Create the service users and groups.
+    users.users = mkMerge (mapAttrsToList (netName: netCfg:
+      mkIf netCfg.tun.disable {
+        ${nameToId netName} = {
+          group = nameToId netName;
+          description = "Nebula service user for network ${netName}";
+          isSystemUser = true;
+        };
+      }) enabledNetworks);
+
+    users.groups = mkMerge (mapAttrsToList (netName: netCfg:
+      mkIf netCfg.tun.disable {
+        ${nameToId netName} = {};
+      }) enabledNetworks);
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/networkmanager.nix b/nixpkgs/nixos/modules/services/networking/networkmanager.nix
index 119bd09e2fdb..135f29be58c0 100644
--- a/nixpkgs/nixos/modules/services/networking/networkmanager.nix
+++ b/nixpkgs/nixos/modules/services/networking/networkmanager.nix
@@ -484,6 +484,8 @@ in {
       })
     ];
 
+    boot.kernelModules = [ "ctr" ];
+
     security.polkit.extraConfig = polkitConf;
 
     services.dbus.packages = cfg.packages
diff --git a/nixpkgs/nixos/modules/services/networking/pixiecore.nix b/nixpkgs/nixos/modules/services/networking/pixiecore.nix
index 85aa40784af8..d2642c82c2d3 100644
--- a/nixpkgs/nixos/modules/services/networking/pixiecore.nix
+++ b/nixpkgs/nixos/modules/services/networking/pixiecore.nix
@@ -93,6 +93,7 @@ in
     users.users.pixiecore = {
       description = "Pixiecore daemon user";
       group = "pixiecore";
+      isSystemUser = true;
     };
 
     networking.firewall = mkIf cfg.openFirewall {
diff --git a/nixpkgs/nixos/modules/services/networking/pleroma.nix b/nixpkgs/nixos/modules/services/networking/pleroma.nix
index 9b2bf9f61244..2687230a158d 100644
--- a/nixpkgs/nixos/modules/services/networking/pleroma.nix
+++ b/nixpkgs/nixos/modules/services/networking/pleroma.nix
@@ -75,6 +75,7 @@ in {
         description = "Pleroma user";
         home = cfg.stateDir;
         extraGroups = [ cfg.group ];
+        isSystemUser = true;
       };
       groups."${cfg.group}" = {};
     };
diff --git a/nixpkgs/nixos/modules/services/networking/privoxy.nix b/nixpkgs/nixos/modules/services/networking/privoxy.nix
index 7c22b7d09b9b..df818baa465d 100644
--- a/nixpkgs/nixos/modules/services/networking/privoxy.nix
+++ b/nixpkgs/nixos/modules/services/networking/privoxy.nix
@@ -242,7 +242,7 @@ in
           "default.action"
         ] ++ optional cfg.inspectHttps (toString inspectAction);
     } // (optionalAttrs cfg.enableTor {
-      forward-socks5 = "127.0.0.1:9063 .";
+      forward-socks5 = "/ 127.0.0.1:9063 .";
       toggle = true;
       enable-remote-toggle = false;
       enable-edit-actions = false;
diff --git a/nixpkgs/nixos/modules/services/networking/quagga.nix b/nixpkgs/nixos/modules/services/networking/quagga.nix
deleted file mode 100644
index 7c169fe62d8d..000000000000
--- a/nixpkgs/nixos/modules/services/networking/quagga.nix
+++ /dev/null
@@ -1,185 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-
-  cfg = config.services.quagga;
-
-  services = [ "babel" "bgp" "isis" "ospf6" "ospf" "pim" "rip" "ripng" ];
-  allServices = services ++ [ "zebra" ];
-
-  isEnabled = service: cfg.${service}.enable;
-
-  daemonName = service: if service == "zebra" then service else "${service}d";
-
-  configFile = service:
-    let
-      scfg = cfg.${service};
-    in
-      if scfg.configFile != null then scfg.configFile
-      else pkgs.writeText "${daemonName service}.conf"
-        ''
-          ! Quagga ${daemonName service} configuration
-          !
-          hostname ${config.networking.hostName}
-          log syslog
-          service password-encryption
-          !
-          ${scfg.config}
-          !
-          end
-        '';
-
-  serviceOptions = service:
-    {
-      enable = mkEnableOption "the Quagga ${toUpper service} routing protocol";
-
-      configFile = mkOption {
-        type = types.nullOr types.path;
-        default = null;
-        example = "/etc/quagga/${daemonName service}.conf";
-        description = ''
-          Configuration file to use for Quagga ${daemonName service}.
-          By default the NixOS generated files are used.
-        '';
-      };
-
-      config = mkOption {
-        type = types.lines;
-        default = "";
-        example =
-          let
-            examples = {
-              rip = ''
-                router rip
-                  network 10.0.0.0/8
-              '';
-
-              ospf = ''
-                router ospf
-                  network 10.0.0.0/8 area 0
-              '';
-
-              bgp = ''
-                router bgp 65001
-                  neighbor 10.0.0.1 remote-as 65001
-              '';
-            };
-          in
-            examples.${service} or "";
-        description = ''
-          ${daemonName service} configuration statements.
-        '';
-      };
-
-      vtyListenAddress = mkOption {
-        type = types.str;
-        default = "127.0.0.1";
-        description = ''
-          Address to bind to for the VTY interface.
-        '';
-      };
-
-      vtyListenPort = mkOption {
-        type = types.nullOr types.int;
-        default = null;
-        description = ''
-          TCP Port to bind to for the VTY interface.
-        '';
-      };
-    };
-
-in
-
-{
-
-  ###### interface
-  imports = [
-    {
-      options.services.quagga = {
-        zebra = (serviceOptions "zebra") // {
-          enable = mkOption {
-            type = types.bool;
-            default = any isEnabled services;
-            description = ''
-              Whether to enable the Zebra routing manager.
-
-              The Zebra routing manager is automatically enabled
-              if any routing protocols are configured.
-            '';
-          };
-        };
-      };
-    }
-    { options.services.quagga = (genAttrs services serviceOptions); }
-  ];
-
-  ###### implementation
-
-  config = mkIf (any isEnabled allServices) {
-
-    environment.systemPackages = [
-      pkgs.quagga               # for the vtysh tool
-    ];
-
-    users.users.quagga = {
-      description = "Quagga daemon user";
-      isSystemUser = true;
-      group = "quagga";
-    };
-
-    users.groups = {
-      quagga = {};
-      # Members of the quaggavty group can use vtysh to inspect the Quagga daemons
-      quaggavty = { members = [ "quagga" ]; };
-    };
-
-    systemd.services =
-      let
-        quaggaService = service:
-          let
-            scfg = cfg.${service};
-            daemon = daemonName service;
-          in
-            nameValuePair daemon ({
-              wantedBy = [ "multi-user.target" ];
-              restartTriggers = [ (configFile service) ];
-
-              serviceConfig = {
-                Type = "forking";
-                PIDFile = "/run/quagga/${daemon}.pid";
-                ExecStart = "@${pkgs.quagga}/libexec/quagga/${daemon} ${daemon} -d -f ${configFile service}"
-                  + optionalString (scfg.vtyListenAddress != "") " -A ${scfg.vtyListenAddress}"
-                  + optionalString (scfg.vtyListenPort != null) " -P ${toString scfg.vtyListenPort}";
-                ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
-                Restart = "on-abort";
-              };
-            } // (
-              if service == "zebra" then
-                {
-                  description = "Quagga Zebra routing manager";
-                  unitConfig.Documentation = "man:zebra(8)";
-                  after = [ "network.target" ];
-                  preStart = ''
-                    install -m 0755 -o quagga -g quagga -d /run/quagga
-
-                    ${pkgs.iproute2}/bin/ip route flush proto zebra
-                  '';
-                }
-              else
-                {
-                  description = "Quagga ${toUpper service} routing daemon";
-                  unitConfig.Documentation = "man:${daemon}(8) man:zebra(8)";
-                  bindsTo = [ "zebra.service" ];
-                  after = [ "network.target" "zebra.service" ];
-                }
-            ));
-       in
-         listToAttrs (map quaggaService (filter isEnabled allServices));
-
-  };
-
-  meta.maintainers = with lib.maintainers; [ tavyc ];
-
-}
diff --git a/nixpkgs/nixos/modules/services/networking/spacecookie.nix b/nixpkgs/nixos/modules/services/networking/spacecookie.nix
index c4d06df6ad4a..e0bef9e9628d 100644
--- a/nixpkgs/nixos/modules/services/networking/spacecookie.nix
+++ b/nixpkgs/nixos/modules/services/networking/spacecookie.nix
@@ -4,10 +4,22 @@ with lib;
 
 let
   cfg = config.services.spacecookie;
-  configFile = pkgs.writeText "spacecookie.json" (lib.generators.toJSON {} {
-    inherit (cfg) hostname port root;
-  });
+
+  spacecookieConfig = {
+    listen = {
+      inherit (cfg) port;
+    };
+  } // cfg.settings;
+
+  format = pkgs.formats.json {};
+
+  configFile = format.generate "spacecookie.json" spacecookieConfig;
+
 in {
+  imports = [
+    (mkRenamedOptionModule [ "services" "spacecookie" "root" ] [ "services" "spacecookie" "settings" "root" ])
+    (mkRenamedOptionModule [ "services" "spacecookie" "hostname" ] [ "services" "spacecookie" "settings" "hostname" ])
+  ];
 
   options = {
 
@@ -15,32 +27,149 @@ in {
 
       enable = mkEnableOption "spacecookie";
 
-      hostname = mkOption {
-        type = types.str;
-        default = "localhost";
-        description = "The hostname the service is reachable via. Clients will use this hostname for further requests after loading the initial gopher menu.";
+      package = mkOption {
+        type = types.package;
+        default = pkgs.spacecookie;
+        defaultText = literalExample "pkgs.spacecookie";
+        example = literalExample "pkgs.haskellPackages.spacecookie";
+        description = ''
+          The spacecookie derivation to use. This can be used to
+          override the used package or to use another version.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to open the necessary port in the firewall for spacecookie.
+        '';
       };
 
       port = mkOption {
         type = types.port;
         default = 70;
-        description = "Port the gopher service should be exposed on.";
+        description = ''
+          Port the gopher service should be exposed on.
+        '';
+      };
+
+      address = mkOption {
+        type = types.str;
+        default = "[::]";
+        description = ''
+          Address to listen on. Must be in the
+          <literal>ListenStream=</literal> syntax of
+          <link xlink:href="https://www.freedesktop.org/software/systemd/man/systemd.socket.html">systemd.socket(5)</link>.
+        '';
       };
 
-      root = mkOption {
-        type = types.path;
-        default = "/srv/gopher";
-        description = "The root directory spacecookie serves via gopher.";
+      settings = mkOption {
+        type = types.submodule {
+          freeformType = format.type;
+
+          options.hostname = mkOption {
+            type = types.str;
+            default = "localhost";
+            description = ''
+              The hostname the service is reachable via. Clients
+              will use this hostname for further requests after
+              loading the initial gopher menu.
+            '';
+          };
+
+          options.root = mkOption {
+            type = types.path;
+            default = "/srv/gopher";
+            description = ''
+              The directory spacecookie should serve via gopher.
+              Files in there need to be world-readable since
+              the spacecookie service file sets
+              <literal>DynamicUser=true</literal>.
+            '';
+          };
+
+          options.log = {
+            enable = mkEnableOption "logging for spacecookie"
+              // { default = true; example = false; };
+
+            hide-ips = mkOption {
+              type = types.bool;
+              default = true;
+              description = ''
+                If enabled, spacecookie will hide personal
+                information of users like IP addresses from
+                log output.
+              '';
+            };
+
+            hide-time = mkOption {
+              type = types.bool;
+              # since we are starting with systemd anyways
+              # we deviate from the default behavior here:
+              # journald will add timestamps, so no need
+              # to double up.
+              default = true;
+              description = ''
+                If enabled, spacecookie will not print timestamps
+                at the beginning of every log line.
+              '';
+            };
+
+            level = mkOption {
+              type = types.enum [
+                "info"
+                "warn"
+                "error"
+              ];
+              default = "info";
+              description = ''
+                Log level for the spacecookie service.
+              '';
+            };
+          };
+        };
+
+        description = ''
+          Settings for spacecookie. The settings set here are
+          directly translated to the spacecookie JSON config
+          file. See
+          <link xlink:href="https://sternenseemann.github.io/spacecookie/spacecookie.json.5.html">spacecookie.json(5)</link>
+          for explanations of all options.
+        '';
       };
     };
   };
 
   config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !(cfg.settings ? user);
+        message = ''
+          spacecookie is started as a normal user, so the setuid
+          feature doesn't work. If you want to run spacecookie as
+          a specific user, set:
+          systemd.services.spacecookie.serviceConfig = {
+            DynamicUser = false;
+            User = "youruser";
+            Group = "yourgroup";
+          }
+        '';
+      }
+      {
+        assertion = !(cfg.settings ? listen || cfg.settings ? port);
+        message = ''
+          The NixOS spacecookie module uses socket activation,
+          so the listen options have no effect. Use the port
+          and address options in services.spacecookie instead.
+        '';
+      }
+    ];
 
     systemd.sockets.spacecookie = {
       description = "Socket for the Spacecookie Gopher Server";
       wantedBy = [ "sockets.target" ];
-      listenStreams = [ "[::]:${toString cfg.port}" ];
+      listenStreams = [ "${cfg.address}:${toString cfg.port}" ];
       socketConfig = {
         BindIPv6Only = "both";
       };
@@ -53,7 +182,7 @@ in {
 
       serviceConfig = {
         Type = "notify";
-        ExecStart = "${pkgs.haskellPackages.spacecookie}/bin/spacecookie ${configFile}";
+        ExecStart = "${lib.getBin cfg.package}/bin/spacecookie ${configFile}";
         FileDescriptorStoreMax = 1;
 
         DynamicUser = true;
@@ -79,5 +208,9 @@ in {
         RestrictAddressFamilies = "AF_UNIX AF_INET6";
       };
     };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
   };
 }
diff --git a/nixpkgs/nixos/modules/services/security/fprintd.nix b/nixpkgs/nixos/modules/services/security/fprintd.nix
index 48f8a9616c3e..fe0fba5b45d7 100644
--- a/nixpkgs/nixos/modules/services/security/fprintd.nix
+++ b/nixpkgs/nixos/modules/services/security/fprintd.nix
@@ -5,6 +5,7 @@ with lib;
 let
 
   cfg = config.services.fprintd;
+  fprintdPkg = if cfg.tod.enable then pkgs.fprintd-tod else pkgs.fprintd;
 
 in
 
@@ -17,25 +18,30 @@ in
 
     services.fprintd = {
 
-      enable = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Whether to enable fprintd daemon and PAM module for fingerprint readers handling.
-        '';
-      };
+      enable = mkEnableOption "fprintd daemon and PAM module for fingerprint readers handling";
 
       package = mkOption {
         type = types.package;
-        default = pkgs.fprintd;
-        defaultText = "pkgs.fprintd";
+        default = fprintdPkg;
+        defaultText = "if cfg.tod.enable then pkgs.fprintd-tod else pkgs.fprintd";
         description = ''
           fprintd package to use.
         '';
       };
 
-    };
+      tod = {
+
+        enable = mkEnableOption "Touch OEM Drivers library support";
 
+        driver = mkOption {
+          type = types.package;
+          example = literalExample "pkgs.libfprint-2-tod1-goodix";
+          description = ''
+            Touch OEM Drivers (TOD) package to use.
+          '';
+        };
+      };
+    };
   };
 
 
@@ -49,6 +55,10 @@ in
 
     systemd.packages = [ cfg.package ];
 
+    systemd.services.fprintd.environment = mkIf cfg.tod.enable {
+      FP_TOD_DRIVERS_DIR = "${cfg.tod.driver}${cfg.tod.driver.driverPath}";
+    };
+
   };
 
 }
diff --git a/nixpkgs/nixos/modules/services/security/oauth2_proxy.nix b/nixpkgs/nixos/modules/services/security/oauth2_proxy.nix
index 77c579279abe..e85fd4b75df4 100644
--- a/nixpkgs/nixos/modules/services/security/oauth2_proxy.nix
+++ b/nixpkgs/nixos/modules/services/security/oauth2_proxy.nix
@@ -90,10 +90,10 @@ in
 
     package = mkOption {
       type = types.package;
-      default = pkgs.oauth2_proxy;
-      defaultText = "pkgs.oauth2_proxy";
+      default = pkgs.oauth2-proxy;
+      defaultText = "pkgs.oauth2-proxy";
       description = ''
-        The package that provides oauth2_proxy.
+        The package that provides oauth2-proxy.
       '';
     };
 
diff --git a/nixpkgs/nixos/modules/services/security/privacyidea.nix b/nixpkgs/nixos/modules/services/security/privacyidea.nix
index f7b40089a932..2696dca4c767 100644
--- a/nixpkgs/nixos/modules/services/security/privacyidea.nix
+++ b/nixpkgs/nixos/modules/services/security/privacyidea.nix
@@ -264,6 +264,7 @@ in
 
       users.users.privacyidea = mkIf (cfg.user == "privacyidea") {
         group = cfg.group;
+        isSystemUser = true;
       };
 
       users.groups.privacyidea = mkIf (cfg.group == "privacyidea") {};
@@ -294,6 +295,7 @@ in
 
       users.users.pi-ldap-proxy = mkIf (cfg.ldap-proxy.user == "pi-ldap-proxy") {
         group = cfg.ldap-proxy.group;
+        isSystemUser = true;
       };
 
       users.groups.pi-ldap-proxy = mkIf (cfg.ldap-proxy.group == "pi-ldap-proxy") {};
diff --git a/nixpkgs/nixos/modules/services/security/sshguard.nix b/nixpkgs/nixos/modules/services/security/sshguard.nix
index 033ff5ef4b5a..53bd9efa5ac7 100644
--- a/nixpkgs/nixos/modules/services/security/sshguard.nix
+++ b/nixpkgs/nixos/modules/services/security/sshguard.nix
@@ -5,6 +5,21 @@ with lib;
 let
   cfg = config.services.sshguard;
 
+  configFile = let
+    args = lib.concatStringsSep " " ([
+      "-afb"
+      "-p info"
+      "-o cat"
+      "-n1"
+    ] ++ (map (name: "-t ${escapeShellArg name}") cfg.services));
+    backend = if config.networking.nftables.enable
+      then "sshg-fw-nft-sets"
+      else "sshg-fw-ipset";
+  in pkgs.writeText "sshguard.conf" ''
+    BACKEND="${pkgs.sshguard}/libexec/${backend}"
+    LOGREADER="LANG=C ${pkgs.systemd}/bin/journalctl ${args}"
+  '';
+
 in {
 
   ###### interface
@@ -85,20 +100,7 @@ in {
 
   config = mkIf cfg.enable {
 
-    environment.etc."sshguard.conf".text = let
-      args = lib.concatStringsSep " " ([
-        "-afb"
-        "-p info"
-        "-o cat"
-        "-n1"
-      ] ++ (map (name: "-t ${escapeShellArg name}") cfg.services));
-      backend = if config.networking.nftables.enable
-        then "sshg-fw-nft-sets"
-        else "sshg-fw-ipset";
-    in ''
-      BACKEND="${pkgs.sshguard}/libexec/${backend}"
-      LOGREADER="LANG=C ${pkgs.systemd}/bin/journalctl ${args}"
-    '';
+    environment.etc."sshguard.conf".source = configFile;
 
     systemd.services.sshguard = {
       description = "SSHGuard brute-force attacks protection system";
@@ -107,6 +109,8 @@ in {
       after = [ "network.target" ];
       partOf = optional config.networking.firewall.enable "firewall.service";
 
+      restartTriggers = [ configFile ];
+
       path = with pkgs; if config.networking.nftables.enable
         then [ nftables iproute2 systemd ]
         else [ iptables ipset iproute2 systemd ];
diff --git a/nixpkgs/nixos/modules/services/security/step-ca.nix b/nixpkgs/nixos/modules/services/security/step-ca.nix
new file mode 100644
index 000000000000..64eee11f5880
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/step-ca.nix
@@ -0,0 +1,134 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.step-ca;
+  settingsFormat = (pkgs.formats.json { });
+in
+{
+  meta.maintainers = with lib.maintainers; [ mohe2015 ];
+
+  options = {
+    services.step-ca = {
+      enable = lib.mkEnableOption "the smallstep certificate authority server";
+      openFirewall = lib.mkEnableOption "opening the certificate authority server port";
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.step-ca;
+        description = "Which step-ca package to use.";
+      };
+      address = lib.mkOption {
+        type = lib.types.str;
+        example = "127.0.0.1";
+        description = ''
+          The address (without port) the certificate authority should listen at.
+          This combined with <option>services.step-ca.port</option> overrides <option>services.step-ca.settings.address</option>.
+        '';
+      };
+      port = lib.mkOption {
+        type = lib.types.port;
+        example = 8443;
+        description = ''
+          The port the certificate authority should listen on.
+          This combined with <option>services.step-ca.address</option> overrides <option>services.step-ca.settings.address</option>.
+        '';
+      };
+      settings = lib.mkOption {
+        type = with lib.types; attrsOf anything;
+        description = ''
+          Settings that go into <filename>ca.json</filename>. See
+          <link xlink:href="https://smallstep.com/docs/step-ca/configuration">
+          the step-ca manual</link> for more information. The easiest way to
+          configure this module would be to run <literal>step ca init</literal>
+          to generate <filename>ca.json</filename> and then import it using
+          <literal>builtins.fromJSON</literal>.
+          <link xlink:href="https://smallstep.com/docs/step-cli/basic-crypto-operations#run-an-offline-x509-certificate-authority">This article</link>
+          may also be useful if you want to customize certain aspects of
+          certificate generation for your CA.
+          You need to change the database storage path to <filename>/var/lib/step-ca/db</filename>.
+
+          <warning>
+            <para>
+              The <option>services.step-ca.settings.address</option> option
+              will be ignored and overwritten by
+              <option>services.step-ca.address</option> and
+              <option>services.step-ca.port</option>.
+            </para>
+          </warning>
+        '';
+      };
+      intermediatePasswordFile = lib.mkOption {
+        type = lib.types.path;
+        example = "/run/keys/smallstep-password";
+        description = ''
+          Path to the file containing the password for the intermediate
+          certificate private key.
+
+          <warning>
+            <para>
+              Make sure to use a quoted absolute path instead of a path literal
+              to prevent it from being copied to the globally readable Nix
+              store.
+            </para>
+          </warning>
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf config.services.step-ca.enable (
+    let
+      configFile = settingsFormat.generate "ca.json" (cfg.settings // {
+        address = cfg.address + ":" + toString cfg.port;
+      });
+    in
+    {
+      assertions =
+        [
+          {
+            assertion = !lib.isStorePath cfg.intermediatePasswordFile;
+            message = ''
+              <option>services.step-ca.intermediatePasswordFile</option> points to
+              a file in the Nix store. You should use a quoted absolute path to
+              prevent this.
+            '';
+          }
+        ];
+
+      systemd.packages = [ cfg.package ];
+
+      # configuration file indirection is needed to support reloading
+      environment.etc."smallstep/ca.json".source = configFile;
+
+      systemd.services."step-ca" = {
+        wantedBy = [ "multi-user.target" ];
+        restartTriggers = [ configFile ];
+        unitConfig = {
+          ConditionFileNotEmpty = ""; # override upstream
+        };
+        serviceConfig = {
+          Environment = "HOME=%S/step-ca";
+          WorkingDirectory = ""; # override upstream
+          ReadWriteDirectories = ""; # override upstream
+
+          # LocalCredential handles file permission problems arising from the use of DynamicUser.
+          LoadCredential = "intermediate_password:${cfg.intermediatePasswordFile}";
+
+          ExecStart = [
+            "" # override upstream
+            "${cfg.package}/bin/step-ca /etc/smallstep/ca.json --password-file \${CREDENTIALS_DIRECTORY}/intermediate_password"
+          ];
+
+          # ProtectProc = "invisible"; # not supported by upstream yet
+          # ProcSubset = "pid"; # not supported by upstream upstream yet
+          # PrivateUsers = true; # doesn't work with privileged ports therefore not supported by upstream
+
+          DynamicUser = true;
+          StateDirectory = "step-ca";
+        };
+      };
+
+      networking.firewall = lib.mkIf cfg.openFirewall {
+        allowedTCPPorts = [ cfg.port ];
+      };
+    }
+  );
+}
diff --git a/nixpkgs/nixos/modules/services/ttys/getty.nix b/nixpkgs/nixos/modules/services/ttys/getty.nix
index ecfabef5fb13..2480e681de8d 100644
--- a/nixpkgs/nixos/modules/services/ttys/getty.nix
+++ b/nixpkgs/nixos/modules/services/ttys/getty.nix
@@ -5,17 +5,16 @@ with lib;
 let
   cfg = config.services.getty;
 
-  loginArgs = [
+  baseArgs = [
     "--login-program" "${pkgs.shadow}/bin/login"
   ] ++ optionals (cfg.autologinUser != null) [
     "--autologin" cfg.autologinUser
   ] ++ optionals (cfg.loginOptions != null) [
     "--login-options" cfg.loginOptions
-  ];
+  ] ++ cfg.extraArgs;
 
-  gettyCmd = extraArgs:
-    "@${pkgs.util-linux}/sbin/agetty agetty ${escapeShellArgs loginArgs} "
-      + extraArgs;
+  gettyCmd = args:
+    "@${pkgs.util-linux}/sbin/agetty agetty ${escapeShellArgs baseArgs} ${args}";
 
 in
 
@@ -54,7 +53,16 @@ in
           will not be invoked with a <option>--login-options</option>
           option.
         '';
-        example = "-h darkstar -- \u";
+        example = "-h darkstar -- \\u";
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = ''
+          Additional arguments passed to agetty.
+        '';
+        example = [ "--nohostname" ];
       };
 
       greetingLine = mkOption {
diff --git a/nixpkgs/nixos/modules/services/web-apps/discourse.nix b/nixpkgs/nixos/modules/services/web-apps/discourse.nix
index 03ea002c9dee..00b58d502574 100644
--- a/nixpkgs/nixos/modules/services/web-apps/discourse.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/discourse.nix
@@ -702,7 +702,7 @@ in
           cp -r ${cfg.package}/share/discourse/config.dist/* /run/discourse/config/
           cp -r ${cfg.package}/share/discourse/public.dist/* /run/discourse/public/
           cp -r ${cfg.package}/share/discourse/plugins.dist/* /run/discourse/plugins/
-          ${lib.concatMapStrings (p: "ln -sf ${p} /run/discourse/plugins/") cfg.plugins}
+          ${lib.concatMapStringsSep "\n" (p: "ln -sf ${p} /run/discourse/plugins/") cfg.plugins}
           ln -sf /var/lib/discourse/uploads /run/discourse/public/uploads
           ln -sf /var/lib/discourse/backups /run/discourse/public/backups
 
@@ -726,7 +726,8 @@ in
           export ADMIN_EMAIL="${cfg.admin.email}"
           export ADMIN_NAME="${cfg.admin.fullName}"
           export ADMIN_USERNAME="${cfg.admin.username}"
-          export ADMIN_PASSWORD="$(<${cfg.admin.passwordFile})"
+          ADMIN_PASSWORD="$(<${cfg.admin.passwordFile})"
+          export ADMIN_PASSWORD
           discourse-rake admin:create_noninteractively
 
           discourse-rake themes:update
@@ -938,7 +939,8 @@ in
               set -o errexit -o pipefail -o nounset -o errtrace
               shopt -s inherit_errexit
 
-              export api_key=$(<'${apiKeyPath}')
+              api_key=$(<'${apiKeyPath}')
+              export api_key
 
               jq <${mail-receiver-json} \
                  '.DISCOURSE_API_KEY = $ENV.api_key' \
diff --git a/nixpkgs/nixos/modules/services/web-apps/mastodon.nix b/nixpkgs/nixos/modules/services/web-apps/mastodon.nix
index 16e8ae2ec0b2..661320b5d00a 100644
--- a/nixpkgs/nixos/modules/services/web-apps/mastodon.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/mastodon.nix
@@ -31,6 +31,8 @@ let
   // (if cfg.smtp.authenticate then { SMTP_LOGIN  = cfg.smtp.user; } else {})
   // cfg.extraConfig;
 
+  systemCallsList = [ "@clock" "@cpu-emulation" "@debug" "@keyring" "@module" "@mount" "@obsolete" "@raw-io" "@reboot" "@resources" "@setuid" "@swap" ];
+
   cfgService = {
     # User and group
     User = cfg.user;
@@ -68,7 +70,6 @@ let
     PrivateMounts = true;
     # System Call Filtering
     SystemCallArchitectures = "native";
-    SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @module @mount @obsolete @reboot @resources @setuid @swap";
   };
 
   envFile = pkgs.writeText "mastodon.env" (lib.concatMapStrings (s: s + "\n") (
@@ -432,6 +433,8 @@ in {
       serviceConfig = {
         Type = "oneshot";
         WorkingDirectory = cfg.package;
+        # System Call Filtering
+        SystemCallFilter = "~" + lib.concatStringsSep " " systemCallsList;
       } // cfgService;
 
       after = [ "network.target" ];
@@ -457,6 +460,8 @@ in {
         Type = "oneshot";
         EnvironmentFile = "/var/lib/mastodon/.secrets_env";
         WorkingDirectory = cfg.package;
+        # System Call Filtering
+        SystemCallFilter = "~" + lib.concatStringsSep " " systemCallsList;
       } // cfgService;
       after = [ "mastodon-init-dirs.service" "network.target" ] ++ (if databaseActuallyCreateLocally then [ "postgresql.service" ] else []);
       wantedBy = [ "multi-user.target" ];
@@ -481,6 +486,8 @@ in {
         # Runtime directory and mode
         RuntimeDirectory = "mastodon-streaming";
         RuntimeDirectoryMode = "0750";
+        # System Call Filtering
+        SystemCallFilter = "~" + lib.concatStringsSep " " (systemCallsList ++ [ "@privileged" ]);
       } // cfgService;
     };
 
@@ -503,6 +510,8 @@ in {
         # Runtime directory and mode
         RuntimeDirectory = "mastodon-web";
         RuntimeDirectoryMode = "0750";
+        # System Call Filtering
+        SystemCallFilter = "~" + lib.concatStringsSep " " (systemCallsList ++ [ "@privileged" ]);
       } // cfgService;
       path = with pkgs; [ file imagemagick ffmpeg ];
     };
@@ -522,6 +531,8 @@ in {
         RestartSec = 20;
         EnvironmentFile = "/var/lib/mastodon/.secrets_env";
         WorkingDirectory = cfg.package;
+        # System Call Filtering
+        SystemCallFilter = "~" + lib.concatStringsSep " " (systemCallsList ++ [ "@privileged" ]);
       } // cfgService;
       path = with pkgs; [ file imagemagick ffmpeg ];
     };
diff --git a/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix b/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
index 9a541aba6e43..545deaa905f2 100644
--- a/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
@@ -10,7 +10,7 @@ let
     extensions = { enabled, all }:
       (with all;
         enabled
-        ++ optional (!cfg.disableImagemagick) imagick
+        ++ optional cfg.enableImagemagick imagick
         # Optionally enabled depending on caching settings
         ++ optional cfg.caching.apcu apcu
         ++ optional cfg.caching.redis redis
@@ -63,6 +63,9 @@ in {
       Further details about this can be found in the `Nextcloud`-section of the NixOS-manual
       (which can be openend e.g. by running `nixos-help`).
     '')
+    (mkRemovedOptionModule [ "services" "nextcloud" "disableImagemagick" ] ''
+      Use services.nextcloud.nginx.enableImagemagick instead.
+    '')
   ];
 
   options.services.nextcloud = {
@@ -303,16 +306,14 @@ in {
       };
     };
 
-    disableImagemagick = mkOption {
-      type = types.bool;
-      default = false;
-      description = ''
-        Whether to not load the ImageMagick module into PHP.
+    enableImagemagick = mkEnableOption ''
+        Whether to load the ImageMagick module into PHP.
         This is used by the theming app and for generating previews of certain images (e.g. SVG and HEIF).
         You may want to disable it for increased security. In that case, previews will still be available
         for some images (e.g. JPEG and PNG).
         See https://github.com/nextcloud/server/issues/13099
-      '';
+    '' // {
+      default = true;
     };
 
     caching = {
@@ -607,6 +608,7 @@ in {
         home = "${cfg.home}";
         group = "nextcloud";
         createHome = true;
+        isSystemUser = true;
       };
       users.groups.nextcloud.members = [ "nextcloud" config.services.nginx.user ];
 
diff --git a/nixpkgs/nixos/modules/services/web-servers/minio.nix b/nixpkgs/nixos/modules/services/web-servers/minio.nix
index cd123000f009..381a55faff16 100644
--- a/nixpkgs/nixos/modules/services/web-servers/minio.nix
+++ b/nixpkgs/nixos/modules/services/web-servers/minio.nix
@@ -18,9 +18,9 @@ in
     };
 
     dataDir = mkOption {
-      default = "/var/lib/minio/data";
-      type = types.path;
-      description = "The data directory, for storing the objects.";
+      default = [ "/var/lib/minio/data" ];
+      type = types.listOf types.path;
+      description = "The list of data directories for storing the objects. Use one path for regular operation and the minimum of 4 endpoints for Erasure Code mode.";
     };
 
     configDir = mkOption {
@@ -74,15 +74,14 @@ in
   config = mkIf cfg.enable {
     systemd.tmpfiles.rules = [
       "d '${cfg.configDir}' - minio minio - -"
-      "d '${cfg.dataDir}' - minio minio - -"
-    ];
+    ] ++ (map (x:  "d '" + x + "' - minio minio - - ") cfg.dataDir);
 
     systemd.services.minio = {
       description = "Minio Object Storage";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
-        ExecStart = "${cfg.package}/bin/minio server --json --address ${cfg.listenAddress} --config-dir=${cfg.configDir} ${cfg.dataDir}";
+        ExecStart = "${cfg.package}/bin/minio server --json --address ${cfg.listenAddress} --config-dir=${cfg.configDir} ${toString cfg.dataDir}";
         Type = "simple";
         User = "minio";
         Group = "minio";
diff --git a/nixpkgs/nixos/modules/services/web-servers/nginx/default.nix b/nixpkgs/nixos/modules/services/web-servers/nginx/default.nix
index 52fcce6d17b4..18e1263fef5e 100644
--- a/nixpkgs/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixpkgs/nixos/modules/services/web-servers/nginx/default.nix
@@ -157,7 +157,7 @@ let
         proxy_connect_timeout   60;
         proxy_send_timeout      60;
         proxy_read_timeout      60;
-        proxy_http_version      1.0;
+        proxy_http_version      1.1;
         include ${recommendedProxyConfig};
       ''}
 
@@ -249,7 +249,15 @@ let
           + optionalString (ssl && vhost.http2) "http2 "
           + optionalString vhost.default "default_server "
           + optionalString (extraParameters != []) (concatStringsSep " " extraParameters)
-          + ";";
+          + ";"
+          + (if ssl && vhost.http3 then ''
+          # UDP listener for **QUIC+HTTP/3
+          listen ${addr}:${toString port} http3 reuseport;
+          # Advertise that HTTP/3 is available
+          add_header Alt-Svc 'h3=":443"';
+          # Sent when QUIC was used
+          add_header QUIC-Status $quic;
+          '' else "");
 
         redirectListen = filter (x: !x.ssl) defaultListen;
 
@@ -676,6 +684,7 @@ in
                 Defines the address and other parameters of the upstream servers.
               '';
               default = {};
+              example = { "127.0.0.1:8000" = {}; };
             };
             extraConfig = mkOption {
               type = types.lines;
@@ -690,6 +699,14 @@ in
           Defines a group of servers to use as proxy target.
         '';
         default = {};
+        example = literalExample ''
+          "backend_server" = {
+            servers = { "127.0.0.1:8000" = {}; };
+            extraConfig = ''''
+              keepalive 16;
+            '''';
+          };
+        '';
       };
 
       virtualHosts = mkOption {
@@ -818,7 +835,7 @@ in
         ProtectControlGroups = true;
         RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
         LockPersonality = true;
-        MemoryDenyWriteExecute = !(builtins.any (mod: (mod.allowMemoryWriteExecute or false)) (optionals (cfg.package ? modules) cfg.package.modules));
+        MemoryDenyWriteExecute = !(builtins.any (mod: (mod.allowMemoryWriteExecute or false)) cfg.package.modules);
         RestrictRealtime = true;
         RestrictSUIDSGID = true;
         PrivateMounts = true;
@@ -870,6 +887,7 @@ in
     users.users = optionalAttrs (cfg.user == "nginx") {
       nginx = {
         group = cfg.group;
+        isSystemUser = true;
         uid = config.ids.uids.nginx;
       };
     };
diff --git a/nixpkgs/nixos/modules/services/web-servers/nginx/vhost-options.nix b/nixpkgs/nixos/modules/services/web-servers/nginx/vhost-options.nix
index cf211ea9a71b..1f5fe6a368c1 100644
--- a/nixpkgs/nixos/modules/services/web-servers/nginx/vhost-options.nix
+++ b/nixpkgs/nixos/modules/services/web-servers/nginx/vhost-options.nix
@@ -151,6 +151,19 @@ with lib;
       '';
     };
 
+    http3 = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to enable HTTP 3.
+        This requires using <literal>pkgs.nginxQuic</literal> package
+        which can be achived by setting <literal>services.nginx.package = pkgs.nginxQuic;</literal>.
+        Note that HTTP 3 support is experimental and
+        *not* yet recommended for production.
+        Read more at https://quic.nginx.org/
+      '';
+    };
+
     root = mkOption {
       type = types.nullOr types.path;
       default = null;
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/default.nix b/nixpkgs/nixos/modules/services/x11/window-managers/default.nix
index 9ca24310e567..53285fbce877 100644
--- a/nixpkgs/nixos/modules/services/x11/window-managers/default.nix
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/default.nix
@@ -15,6 +15,7 @@ in
     ./cwm.nix
     ./clfswm.nix
     ./dwm.nix
+    ./e16.nix
     ./evilwm.nix
     ./exwm.nix
     ./fluxbox.nix
@@ -37,6 +38,7 @@ in
     ./tinywm.nix
     ./twm.nix
     ./windowmaker.nix
+    ./wmderland.nix
     ./wmii.nix
     ./xmonad.nix
     ./yeahwm.nix
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/e16.nix b/nixpkgs/nixos/modules/services/x11/window-managers/e16.nix
new file mode 100644
index 000000000000..3e1a22c4dabd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/e16.nix
@@ -0,0 +1,26 @@
+{ config , lib , pkgs , ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.e16;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.e16.enable = mkEnableOption "e16";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "E16";
+      start = ''
+        ${pkgs.e16}/bin/e16 &
+        waitPID=$!
+      '';
+    };
+
+    environment.systemPackages = [ pkgs.e16 ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/wmderland.nix b/nixpkgs/nixos/modules/services/x11/window-managers/wmderland.nix
new file mode 100644
index 000000000000..a6864a827719
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/wmderland.nix
@@ -0,0 +1,61 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.wmderland;
+in
+
+{
+  options.services.xserver.windowManager.wmderland = {
+    enable = mkEnableOption "wmderland";
+
+    extraSessionCommands = mkOption {
+      default = "";
+      type = types.lines;
+      description = ''
+        Shell commands executed just before wmderland is started.
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = with pkgs; [
+        rofi
+        dunst
+        light
+        hsetroot
+        feh
+        rxvt-unicode
+      ];
+      example = literalExample ''
+        with pkgs; [
+          rofi
+          dunst
+          light
+          hsetroot
+          feh
+          rxvt-unicode
+        ]
+      '';
+      description = ''
+        Extra packages to be installed system wide.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "wmderland";
+      start = ''
+        ${cfg.extraSessionCommands}
+
+        ${pkgs.wmderland}/bin/wmderland &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [
+      pkgs.wmderland pkgs.wmderlandc
+    ] ++ cfg.extraPackages;
+  };
+}