about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/adding-custom-packages.section.md4
-rw-r--r--nixos/doc/manual/development/unit-handling.section.md7
-rw-r--r--nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md5
-rw-r--r--nixos/doc/manual/release-notes/rl-2205.section.md3
-rw-r--r--nixos/doc/manual/release-notes/rl-2311.section.md114
-rw-r--r--nixos/lib/make-btrfs-fs.nix65
-rw-r--r--nixos/lib/systemd-lib.nix6
-rw-r--r--nixos/lib/test-driver/test_driver/machine.py32
-rw-r--r--nixos/lib/testing/driver.nix7
-rw-r--r--nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix20
-rw-r--r--nixos/maintainers/scripts/lxd/lxd-container-image.nix (renamed from nixos/maintainers/scripts/lxd/lxd-image.nix)6
-rw-r--r--nixos/maintainers/scripts/lxd/lxd-image-inner.nix95
-rw-r--r--nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix20
-rw-r--r--nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix27
-rwxr-xr-xnixos/maintainers/scripts/oci/create-image.sh24
-rwxr-xr-xnixos/maintainers/scripts/oci/upload-image.sh100
-rw-r--r--nixos/modules/config/fonts/packages.nix2
-rw-r--r--nixos/modules/config/terminfo.nix38
-rw-r--r--nixos/modules/config/update-users-groups.pl36
-rw-r--r--nixos/modules/config/users-groups.nix54
-rw-r--r--nixos/modules/hardware/decklink.nix16
-rw-r--r--nixos/modules/hardware/glasgow.nix23
-rw-r--r--nixos/modules/hardware/infiniband.nix58
-rw-r--r--nixos/modules/hardware/video/nvidia.nix549
-rw-r--r--nixos/modules/i18n/input-method/uim.nix2
-rw-r--r--nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix1
-rw-r--r--nixos/modules/installer/cd-dvd/iso-image.nix129
-rw-r--r--nixos/modules/installer/tools/tools.nix4
-rw-r--r--nixos/modules/module-list.nix18
-rw-r--r--nixos/modules/programs/bash/bash.nix2
-rw-r--r--nixos/modules/programs/clash-verge.nix14
-rw-r--r--nixos/modules/programs/dconf.nix237
-rw-r--r--nixos/modules/programs/direnv.nix18
-rw-r--r--nixos/modules/programs/ecryptfs.nix31
-rw-r--r--nixos/modules/programs/environment.nix3
-rw-r--r--nixos/modules/programs/fish.nix17
-rw-r--r--nixos/modules/programs/hyprland.nix4
-rw-r--r--nixos/modules/programs/streamdeck-ui.nix2
-rw-r--r--nixos/modules/programs/tmux.nix12
-rw-r--r--nixos/modules/programs/yazi.nix53
-rw-r--r--nixos/modules/programs/yubikey-touch-detector.nix21
-rw-r--r--nixos/modules/rename.nix3
-rw-r--r--nixos/modules/security/acme/default.md4
-rw-r--r--nixos/modules/security/acme/default.nix149
-rw-r--r--nixos/modules/security/pam.nix2
-rw-r--r--nixos/modules/security/sudo-rs.nix296
-rw-r--r--nixos/modules/security/sudo.nix4
-rw-r--r--nixos/modules/security/wrappers/default.nix20
-rw-r--r--nixos/modules/security/wrappers/wrapper.c109
-rw-r--r--nixos/modules/security/wrappers/wrapper.nix4
-rw-r--r--nixos/modules/services/audio/tts.nix1
-rw-r--r--nixos/modules/services/backup/duplicati.nix7
-rw-r--r--nixos/modules/services/backup/restic.nix34
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/master.nix8
-rw-r--r--nixos/modules/services/continuous-integration/github-runner/options.nix8
-rw-r--r--nixos/modules/services/continuous-integration/github-runner/service.nix5
-rw-r--r--nixos/modules/services/continuous-integration/hail.nix61
-rw-r--r--nixos/modules/services/continuous-integration/woodpecker/agents.nix59
-rw-r--r--nixos/modules/services/databases/surrealdb.nix29
-rw-r--r--nixos/modules/services/editors/emacs.md24
-rw-r--r--nixos/modules/services/finance/odoo.nix8
-rw-r--r--nixos/modules/services/games/xonotic.nix198
-rw-r--r--nixos/modules/services/hardware/auto-cpufreq.nix10
-rw-r--r--nixos/modules/services/hardware/openrgb.nix9
-rw-r--r--nixos/modules/services/home-automation/zigbee2mqtt.nix3
-rw-r--r--nixos/modules/services/logging/logrotate.nix12
-rw-r--r--nixos/modules/services/mail/listmonk.nix4
-rw-r--r--nixos/modules/services/mail/stalwart-mail.nix106
-rw-r--r--nixos/modules/services/matrix/mautrix-telegram.nix1
-rw-r--r--nixos/modules/services/matrix/mautrix-whatsapp.nix99
-rw-r--r--nixos/modules/services/matrix/synapse-log_config.yaml25
-rw-r--r--nixos/modules/services/matrix/synapse.nix678
-rw-r--r--nixos/modules/services/misc/atuin.nix2
-rw-r--r--nixos/modules/services/misc/cfdyndns.nix31
-rw-r--r--nixos/modules/services/misc/forgejo.nix668
-rw-r--r--nixos/modules/services/misc/gitlab.nix7
-rw-r--r--nixos/modules/services/misc/jellyfin.nix3
-rw-r--r--nixos/modules/services/misc/mbpfan.nix19
-rw-r--r--nixos/modules/services/misc/paperless.nix2
-rw-r--r--nixos/modules/services/monitoring/below.nix2
-rw-r--r--nixos/modules/services/monitoring/grafana.nix24
-rw-r--r--nixos/modules/services/monitoring/mimir.nix14
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix15
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix60
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix24
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/unbound.nix84
-rw-r--r--nixos/modules/services/monitoring/vmagent.nix12
-rw-r--r--nixos/modules/services/networking/adguardhome.nix5
-rw-r--r--nixos/modules/services/networking/dae.nix170
-rw-r--r--nixos/modules/services/networking/dnscrypt-proxy2.nix6
-rw-r--r--nixos/modules/services/networking/firewall-nftables.nix9
-rw-r--r--nixos/modules/services/networking/frp.nix93
-rw-r--r--nixos/modules/services/networking/frr.nix1
-rw-r--r--nixos/modules/services/networking/jool.nix313
-rw-r--r--nixos/modules/services/networking/knot.nix132
-rw-r--r--nixos/modules/services/networking/mtr-exporter.nix111
-rw-r--r--nixos/modules/services/networking/nat-nftables.nix36
-rw-r--r--nixos/modules/services/networking/networkmanager.nix135
-rw-r--r--nixos/modules/services/networking/nftables.nix179
-rw-r--r--nixos/modules/services/networking/nncp.nix131
-rw-r--r--nixos/modules/services/networking/privoxy.nix2
-rw-r--r--nixos/modules/services/networking/ssh/sshd.nix22
-rw-r--r--nixos/modules/services/networking/tedicross.nix100
-rw-r--r--nixos/modules/services/networking/trust-dns.nix5
-rw-r--r--nixos/modules/services/networking/websockify.nix2
-rw-r--r--nixos/modules/services/networking/wg-quick.nix2
-rw-r--r--nixos/modules/services/networking/wstunnel.nix4
-rw-r--r--nixos/modules/services/search/typesense.nix4
-rw-r--r--nixos/modules/services/security/kanidm.nix6
-rw-r--r--nixos/modules/services/security/vaultwarden/default.nix6
-rw-r--r--nixos/modules/services/web-apps/calibre-web.nix7
-rw-r--r--nixos/modules/services/web-apps/cloudlog.nix2
-rw-r--r--nixos/modules/services/web-apps/galene.nix2
-rw-r--r--nixos/modules/services/web-apps/grocy.nix16
-rw-r--r--nixos/modules/services/web-apps/healthchecks.nix45
-rw-r--r--nixos/modules/services/web-apps/honk.md23
-rw-r--r--nixos/modules/services/web-apps/honk.nix153
-rw-r--r--nixos/modules/services/web-apps/lemmy.nix2
-rw-r--r--nixos/modules/services/web-apps/meme-bingo-web.nix93
-rw-r--r--nixos/modules/services/web-apps/mobilizon.nix442
-rw-r--r--nixos/modules/services/web-apps/netbox.nix12
-rw-r--r--nixos/modules/services/web-apps/plausible.nix6
-rw-r--r--nixos/modules/services/web-apps/tt-rss.nix44
-rw-r--r--nixos/modules/services/web-apps/vikunja.nix4
-rw-r--r--nixos/modules/services/web-servers/caddy/default.nix2
-rw-r--r--nixos/modules/services/web-servers/garage.nix9
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix12
-rw-r--r--nixos/modules/services/x11/desktop-managers/budgie.nix3
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix13
-rw-r--r--nixos/modules/services/x11/display-managers/gdm.nix56
-rw-r--r--nixos/modules/services/x11/display-managers/sddm.nix1
-rw-r--r--nixos/modules/services/x11/window-managers/default.nix1
-rw-r--r--nixos/modules/services/x11/window-managers/oroborus.nix25
-rw-r--r--nixos/modules/services/x11/window-managers/ragnarwm.nix33
-rwxr-xr-xnixos/modules/system/activation/switch-to-configuration.pl44
-rw-r--r--nixos/modules/system/boot/binfmt.nix22
-rw-r--r--nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.sh28
-rw-r--r--nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix1
-rw-r--r--nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix6
-rw-r--r--nixos/modules/system/boot/luksroot.nix6
-rw-r--r--nixos/modules/system/boot/networkd.nix13
-rw-r--r--nixos/modules/system/boot/resolved.nix8
-rw-r--r--nixos/modules/system/boot/stage-1.nix9
-rwxr-xr-xnixos/modules/system/boot/stage-2-init.sh5
-rw-r--r--nixos/modules/system/boot/systemd.nix4
-rw-r--r--nixos/modules/system/boot/systemd/initrd.nix16
-rw-r--r--nixos/modules/system/boot/systemd/user.nix4
-rw-r--r--nixos/modules/tasks/bcache.nix12
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix5
-rw-r--r--nixos/modules/tasks/network-interfaces.nix6
-rw-r--r--nixos/modules/tasks/swraid.nix21
-rw-r--r--nixos/modules/virtualisation/anbox.nix59
-rw-r--r--nixos/modules/virtualisation/google-compute-config.nix2
-rw-r--r--nixos/modules/virtualisation/lxc-container.nix130
-rw-r--r--nixos/modules/virtualisation/lxc-image-metadata.nix104
-rw-r--r--nixos/modules/virtualisation/lxc-instance-common.nix30
-rw-r--r--nixos/modules/virtualisation/lxd-virtual-machine.nix46
-rw-r--r--nixos/modules/virtualisation/lxd.nix129
-rw-r--r--nixos/modules/virtualisation/oci-common.nix60
-rw-r--r--nixos/modules/virtualisation/oci-config-user.nix12
-rw-r--r--nixos/modules/virtualisation/oci-image.nix50
-rw-r--r--nixos/modules/virtualisation/oci-options.nix14
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix2
-rw-r--r--nixos/release-combined.nix5
-rw-r--r--nixos/release.nix51
-rw-r--r--nixos/tests/acme.nix44
-rw-r--r--nixos/tests/adguardhome.nix1
-rw-r--r--nixos/tests/akkoma.nix5
-rw-r--r--nixos/tests/all-tests.nix25
-rw-r--r--nixos/tests/anbox.nix36
-rw-r--r--nixos/tests/custom-ca.nix4
-rw-r--r--nixos/tests/dae.nix29
-rw-r--r--nixos/tests/dconf.nix34
-rw-r--r--nixos/tests/discourse.nix2
-rw-r--r--nixos/tests/docker-tools.nix2
-rw-r--r--nixos/tests/dolibarr.nix2
-rw-r--r--nixos/tests/fontconfig-default-fonts.nix2
-rw-r--r--nixos/tests/forgejo.nix157
-rw-r--r--nixos/tests/frp.nix86
-rw-r--r--nixos/tests/honk.nix32
-rw-r--r--nixos/tests/jool.nix106
-rw-r--r--nixos/tests/kea.nix51
-rw-r--r--nixos/tests/kernel-generic.nix1
-rw-r--r--nixos/tests/knot.nix140
-rw-r--r--nixos/tests/lxd/container.nix13
-rw-r--r--nixos/tests/lxd/default.nix3
-rw-r--r--nixos/tests/lxd/nftables.nix5
-rw-r--r--nixos/tests/lxd/preseed.nix71
-rw-r--r--nixos/tests/lxd/virtual-machine.nix64
-rw-r--r--nixos/tests/matrix/synapse-workers.nix50
-rw-r--r--nixos/tests/mobilizon.nix44
-rw-r--r--nixos/tests/noto-fonts.nix2
-rw-r--r--nixos/tests/odoo.nix3
-rw-r--r--nixos/tests/openssh.nix33
-rw-r--r--nixos/tests/plausible.nix2
-rw-r--r--nixos/tests/postgis.nix2
-rw-r--r--nixos/tests/prometheus-exporters.nix40
-rw-r--r--nixos/tests/qgis.nix30
-rw-r--r--nixos/tests/ragnarwm.nix32
-rw-r--r--nixos/tests/restic.nix20
-rw-r--r--nixos/tests/shadow.nix2
-rw-r--r--nixos/tests/stalwart-mail.nix117
-rw-r--r--nixos/tests/sudo-rs.nix101
-rw-r--r--nixos/tests/sudo.nix2
-rw-r--r--nixos/tests/switch-test.nix151
-rw-r--r--nixos/tests/systemd-initrd-swraid.nix16
-rw-r--r--nixos/tests/user-activation-scripts.nix3
-rw-r--r--nixos/tests/user-expiry.nix70
-rw-r--r--nixos/tests/wordpress.nix2
-rw-r--r--nixos/tests/wrappers.nix24
-rw-r--r--nixos/tests/zfs.nix7
212 files changed, 7758 insertions, 2122 deletions
diff --git a/nixos/doc/manual/configuration/adding-custom-packages.section.md b/nixos/doc/manual/configuration/adding-custom-packages.section.md
index 89d329550613..2340723e07c6 100644
--- a/nixos/doc/manual/configuration/adding-custom-packages.section.md
+++ b/nixos/doc/manual/configuration/adding-custom-packages.section.md
@@ -44,7 +44,7 @@ environment.systemPackages =
       name = "hello-2.8";
       src = fetchurl {
         url = "mirror://gnu/hello/${name}.tar.gz";
-        sha256 = "0wqd8sjmxfskrflaxywc7gqw7sfawrfvdxd9skxawzfgyy0pzdz6";
+        hash = "sha256-5rd/gffPfa761Kn1tl3myunD8TuM+66oy1O7XqVGDXM=";
       };
     };
   in
@@ -67,7 +67,7 @@ stdenv.mkDerivation rec {
   name = "hello-2.8";
   src = fetchurl {
     url = "mirror://gnu/hello/${name}.tar.gz";
-    sha256 = "0wqd8sjmxfskrflaxywc7gqw7sfawrfvdxd9skxawzfgyy0pzdz6";
+    hash = "sha256-5rd/gffPfa761Kn1tl3myunD8TuM+66oy1O7XqVGDXM=";
   };
 }
 ```
diff --git a/nixos/doc/manual/development/unit-handling.section.md b/nixos/doc/manual/development/unit-handling.section.md
index a7ccb3dbd042..32d44dbfff05 100644
--- a/nixos/doc/manual/development/unit-handling.section.md
+++ b/nixos/doc/manual/development/unit-handling.section.md
@@ -25,8 +25,11 @@ checks:
     since changes in their values are applied by systemd when systemd is
     reloaded.
 
-  - `.mount` units are **reload**ed. These mostly come from the `/etc/fstab`
-    parser.
+  - `.mount` units are **reload**ed if only their `Options` changed. If anything
+    else changed (like `What`), they are **restart**ed unless they are the mount
+    unit for `/` or `/nix` in which case they are reloaded to prevent the system
+    from crashing. Note that this is the case for `.mount` units and not for
+    mounts from `/etc/fstab`. These are explained in [](#sec-switching-systems).
 
   - `.socket` units are currently ignored. This is to be fixed at a later
     point.
diff --git a/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md b/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
index 9cbec729803a..5d6d67f1aa92 100644
--- a/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
+++ b/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
@@ -21,8 +21,9 @@ If the action is `switch` or `test`, the currently running system is inspected
 and the actions to switch to the new system are calculated. This process takes
 two data sources into account: `/etc/fstab` and the current systemd status.
 Mounts and swaps are read from `/etc/fstab` and the corresponding actions are
-generated. If a new mount is added, for example, the proper `.mount` unit is
-marked to be started. The current systemd state is inspected, the difference
+generated. If the options of a mount are modified, for example, the proper `.mount`
+unit is reloaded (or restarted if anything else changed and it's neither the root
+mount or the nix store). The current systemd state is inspected, the difference
 between the current system and the desired configuration is calculated and
 actions are generated to get to this state. There are a lot of nuances that can
 be controlled by the units which are explained here.
diff --git a/nixos/doc/manual/release-notes/rl-2205.section.md b/nixos/doc/manual/release-notes/rl-2205.section.md
index d4581fe9441c..6f5a807f478a 100644
--- a/nixos/doc/manual/release-notes/rl-2205.section.md
+++ b/nixos/doc/manual/release-notes/rl-2205.section.md
@@ -935,8 +935,7 @@ In addition to numerous new and upgraded packages, this release has the followin
   using the `pomerium-cli` command, you should now install the `pomerium-cli`
   package.
 
-- The option
-  [services.networking.networkmanager.enableFccUnlock](#opt-networking.networkmanager.enableFccUnlock)
+- The option `services.networking.networkmanager.enableFccUnlock`
   was added to support FCC unlock procedures. Since release 1.18.4, the ModemManager
   daemon no longer automatically performs the FCC unlock procedure by default. See
   [the docs](https://modemmanager.org/docs/modemmanager/fcc-unlock/) for more details.
diff --git a/nixos/doc/manual/release-notes/rl-2311.section.md b/nixos/doc/manual/release-notes/rl-2311.section.md
index 623576ce4ff2..af5147e01281 100644
--- a/nixos/doc/manual/release-notes/rl-2311.section.md
+++ b/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -6,12 +6,29 @@
 
 - Support for WiFi6 (IEEE 802.11ax) and WPA3-SAE-PK was enabled in the `hostapd` package, along with a significant rework of the hostapd module.
 
+- LXD now supports virtual machine instances to complement the existing container support
+
+- The `nixos-rebuild` command has been given a `list-generations` subcommand. See `man nixos-rebuild` for more details.
+
+- [`sudo-rs`], a reimplementation of `sudo` in Rust, is now supported.
+  An experimental new module `security.sudo-rs` was added.
+  Switching to it (via `security.sudo.enable = false; security.sudo-rs.enable = true;`) introduces
+  slight changes in sudo behaviour, due to `sudo-rs`' current limitations:
+  - terminfo-related environment variables aren't preserved for `root` and `wheel`;
+  - `root` and `wheel` are not given the ability to set (or preserve)
+    arbitrary environment variables.
+
+[`sudo-rs`]: https://github.com/memorysafety/sudo-rs/
+
+
 ## New Services {#sec-release-23.11-new-services}
 
 - [MCHPRS](https://github.com/MCHPR/MCHPRS), a multithreaded Minecraft server built for redstone. Available as [services.mchprs](#opt-services.mchprs.enable).
 
 - [acme-dns](https://github.com/joohoi/acme-dns), a limited DNS server to handle ACME DNS challenges easily and securely. Available as [services.acme-dns](#opt-services.acme-dns.enable).
 
+- [frp](https://github.com/fatedier/frp), a fast reverse proxy to help you expose a local server behind a NAT or firewall to the Internet. Available as [services.frp](#opt-services.frp.enable).
+
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
 
 - [river](https://github.com/riverwm/river), A dynamic tiling wayland compositor. Available as [programs.river](#opt-programs.river.enable).
@@ -28,11 +45,17 @@
 
 * [NS-USBLoader](https://github.com/developersu/ns-usbloader/), an all-in-one tool for managing Nintendo Switch homebrew. Available as [programs.ns-usbloader](#opt-programs.ns-usbloader.enable).
 
+- [Mobilizon](https://joinmobilizon.org/), a Fediverse platform for publishing events.
+
 - [Anuko Time Tracker](https://github.com/anuko/timetracker), a simple, easy to use, open source time tracking system. Available as [services.anuko-time-tracker](#opt-services.anuko-time-tracker.enable).
 
+- [Prometheus MySQL exporter](https://github.com/prometheus/mysqld_exporter), a MySQL server exporter for Prometheus. Available as [services.prometheus.exporters.mysqld](#opt-services.prometheus.exporters.mysqld.enable).
+
 - [sitespeed-io](https://sitespeed.io), a tool that can generate metrics (timings, diagnostics) for websites. Available as [services.sitespeed-io](#opt-services.sitespeed-io.enable).
 
-- [Jool](https://nicmx.github.io/Jool/en/index.html), an Open Source implementation of IPv4/IPv6 translation on Linux. Available as [networking.jool.enable](#opt-networking.jool.enable).
+- [stalwart-mail](https://stalw.art), an all-in-one email server (SMTP, IMAP, JMAP). Available as [services.stalwart-mail](#opt-services.stalwart-mail.enable).
+
+- [Jool](https://nicmx.github.io/Jool/en/index.html), a kernelspace NAT64 and SIIT implementation, providing translation between IPv4 and IPv6. Available as [networking.jool.enable](#opt-networking.jool.enable).
 
 - [Apache Guacamole](https://guacamole.apache.org/), a cross-platform, clientless remote desktop gateway. Available as [services.guacamole-server](#opt-services.guacamole-server.enable) and [services.guacamole-client](#opt-services.guacamole-client.enable) services.
 
@@ -48,6 +71,13 @@
 
 - [eris-server](https://codeberg.org/eris/eris-go). [ERIS](https://eris.codeberg.page/) is an encoding for immutable storage and this server provides block exchange as well as content decoding over HTTP and through a FUSE file-system. Available as [services.eris-server](#opt-services.eris-server.enable).
 
+- hardware/infiniband.nix adds infiniband subnet manager support using an [opensm](https://github.com/linux-rdma/opensm) systemd-template service, instantiated on card guids. The module also adds kernel modules and cli tooling to help administrators debug and measure performance. Available as [hardware.infiniband.enable](#opt-hardware.infiniband.enable).
+
+- [Honk](https://humungus.tedunangst.com/r/honk), a complete ActivityPub server with minimal setup and support costs.
+  Available as [services.honk](#opt-services.honk.enable).
+
+- [NNCP](http://www.nncpgo.org/). Added nncp-daemon and nncp-caller services. Configuration is set with [programs.nncp.settings](#opt-programs.nncp.settings) and the daemons are enabled at [services.nncp](#opt-services.nncp.caller.enable).
+
 ## Backward Incompatibilities {#sec-release-23.11-incompatibilities}
 
 - The `boot.loader.raspberryPi` options have been marked deprecated, with intent for removal for NixOS 24.11. They had a limited use-case, and do not work like people expect. They required either very old installs ([before mid-2019](https://github.com/NixOS/nixpkgs/pull/62462)) or customized builds out of scope of the standard and generic AArch64 support. That option set never supported the Raspberry Pi 4 family of devices.
@@ -66,12 +96,22 @@
 
 - `python3.pkgs.fetchPypi` (and `python3Packages.fetchPypi`) has been deprecated in favor of top-level `fetchPypi`.
 
+- `pass` now does not contain `password-store.el`.  Users should get `password-store.el` from Emacs lisp package set `emacs.pkgs.password-store`.
+
+- `services.knot` now supports `.settings` from RFC42.  The change is not 100% compatible with the previous `.extraConfig`.
+
+- `mu` now does not install `mu4e` files by default.  Users should get `mu4e` from Emacs lisp package set `emacs.pkgs.mu4e`.
+
 - `mariadb` now defaults to `mariadb_1011` instead of `mariadb_106`, meaning the default version was upgraded from 10.6.x to 10.11.x. See the [upgrade notes](https://mariadb.com/kb/en/upgrading-from-mariadb-10-6-to-mariadb-10-11/) for potential issues.
 
 - `getent` has been moved from `glibc`'s `bin` output to its own dedicated output, reducing closure size for many dependents. Dependents using the `getent` alias should not be affected; others should move from using `glibc.bin` or `getBin glibc` to `getent` (which also improves compatibility with non-glibc platforms).
 
+- The `users.users.<name>.passwordFile` has been renamed to `users.users.<name>.hashedPasswordFile` to avoid possible confusions. The option is in fact the file-based version of `hashedPassword`, not `password`, and expects a file containing the {manpage}`crypt(3)` hash of the user password.
+
 - The `services.ananicy.extraRules` option now has the type of `listOf attrs` instead of `string`.
 
+- JACK tools (`jack_*` except `jack_control`) have moved from the `jack2` package to `jack-example-tools`
+
 - The `matrix-synapse` package & module have undergone some significant internal changes, for most setups no intervention is needed, though:
   - The option [`services.matrix-synapse.package`](#opt-services.matrix-synapse.package) is now read-only. For modifying the package, use an overlay which modifies `matrix-synapse-unwrapped` instead. More on that below.
   - The `enableSystemd` & `enableRedis` arguments have been removed and `matrix-synapse` has been renamed to `matrix-synapse-unwrapped`. Also, several optional dependencies (such as `psycopg2` or `authlib`) have been removed.
@@ -81,6 +121,8 @@
 
 - `etcd` has been updated to 3.5, you will want to read the [3.3 to 3.4](https://etcd.io/docs/v3.5/upgrades/upgrade_3_4/) and [3.4 to 3.5](https://etcd.io/docs/v3.5/upgrades/upgrade_3_5/) upgrade guides
 
+- `gitlab` installations created or updated between versions \[15.11.0, 15.11.2] have an incorrect database schema. This will become a problem when upgrading to `gitlab` >=16.2.0. A workaround for affected users can be found in the [GitLab docs](https://docs.gitlab.com/ee/update/versions/gitlab_16_changes.html#undefined-column-error-upgrading-to-162-or-later).
+
 - `consul` has been updated to `1.16.0`. See the [release note](https://github.com/hashicorp/consul/releases/tag/v1.16.0) for more details. Once a new Consul version has started and upgraded its data directory, it generally cannot be downgraded to the previous version.
 
 - `himalaya` has been updated to `0.8.0`, which drops the native TLS support (in favor of Rustls) and add OAuth 2.0 support. See the [release note](https://github.com/soywod/himalaya/releases/tag/v0.8.0) for more details.
@@ -109,10 +151,17 @@
 
 - The ISC DHCP package and corresponding module have been removed, because they are end of life upstream. See https://www.isc.org/blogs/isc-dhcp-eol/ for details and switch to a different DHCP implementation like kea or dnsmasq.
 
+- `prometheus-unbound-exporter` has been replaced by the Let's Encrypt maintained version, since the previous version was archived. This requires some changes to the module configuration, most notable `controlInterface` needs migration
+   towards `unbound.host` and requires either the `tcp://` or `unix://` URI scheme.
+
+- `odoo` now defaults to 16, updated from 15.
+
 - `util-linux` is now supported on Darwin and is no longer an alias to `unixtools`. Use the `unixtools.util-linux` package for access to the Apple variants of the utilities.
 
 - `services.keyd` changed API. Now you can create multiple configuration files.
 
+- `baloo`, the file indexer/search engine used by KDE now has a patch to prevent files from constantly being reindexed when the device ids of the their underlying storage changes. This happens frequently when using btrfs or LVM. The patch has not yet been accepted upstream but it provides a significantly improved experience. When upgrading, reset baloo to get a clean index: `balooctl disable ; balooctl purge ; balooctl enable`.
+
 - `services.ddclient` has been removed on the request of the upstream maintainer because it is unmaintained and has bugs. Please switch to a different software like `inadyn` or `knsupdate`.
 
 - The `vlock` program from the `kbd` package has been moved into its own package output and should now be referenced explicitly as `kbd.vlock` or replaced with an alternative such as the standalone `vlock` package or `physlock`.
@@ -139,10 +188,12 @@
 
 - The Caddy module gained a new option named `services.caddy.enableReload` which is enabled by default. It allows reloading the service instead of restarting it, if only a config file has changed. This option must be disabled if you have turned off the [Caddy admin API](https://caddyserver.com/docs/caddyfile/options#admin). If you keep this option enabled, you should consider setting [`grace_period`](https://caddyserver.com/docs/caddyfile/options#grace-period) to a non-infinite value to prevent Caddy from delaying the reload indefinitely.
 
-- mdraid support is now optional. This reduces initramfs size and prevents the potentially undesired automatic detection and activation of software RAID pools. It is disabled by default in new configurations (determined by `stateVersion`), but the appropriate settings will be generated by `nixos-generate-config` when installing to a software RAID device, so the standard installation procedure should be unaffected. If you have custom configs relying on mdraid, ensure that you use `stateVersion` correctly or set `boot.swraid.enable` manually.
+- mdraid support is now optional. This reduces initramfs size and prevents the potentially undesired automatic detection and activation of software RAID pools. It is disabled by default in new configurations (determined by `stateVersion`), but the appropriate settings will be generated by `nixos-generate-config` when installing to a software RAID device, so the standard installation procedure should be unaffected. If you have custom configs relying on mdraid, ensure that you use `stateVersion` correctly or set `boot.swraid.enable` manually. On systems with an updated `stateVersion` we now also emit warnings if `mdadm.conf` does not contain the minimum required configuration necessary to run the dynamically enabled monitoring daemons.
 
 - The `go-ethereum` package has been updated to v1.12.0. This drops support for proof-of-work. Its GraphQL API now encodes all numeric values as hex strings and the GraphQL UI is updated to version 2.0. The default database has changed from `leveldb` to `pebble` but `leveldb` can be forced with the --db.engine=leveldb flag. The `checkpoint-admin` command was [removed along with trusted checkpoints](https://github.com/ethereum/go-ethereum/pull/27147).
 
+- The `aseprite-unfree` package has been upgraded from 1.2.16.3 to 1.2.40. The free version of aseprite has been dropped because it is EOL and the package attribute now points to the unfree version. A maintained fork of the last free version of Aseprite, named 'LibreSprite', is available in the `libresprite` package.
+
 - The default `kops` version is now 1.27.0 and support for 1.24 and older has been dropped.
 
 - `pharo` has been updated to latest stable (PharoVM 10.0.5), which is compatible with the latest stable and oldstable images (Pharo 10 and 11). The VM in question is the 64bit Spur. The 32bit version has been dropped due to lack of maintenance. The Cog VM has been deleted because it is severily outdated. Finally, the `pharo-launcher` package has been deleted because it was not compatible with the newer VM, and due to lack of maintenance.
@@ -151,8 +202,30 @@
 
 - Emacs macport version 29 was introduced.
 
+- The option `services.networking.networkmanager.enableFccUnlock` was removed in favor of `networking.networkmanager.fccUnlockScripts`, which allows specifying unlock scripts explicitly. The previous option simply did enable all unlock scripts bundled with ModemManager, which is risky, and didn't allow using vendor-provided unlock scripts at all.
+
 - The `html-proofer` package has been updated from major version 3 to major version 5, which includes [breaking changes](https://github.com/gjtorikian/html-proofer/blob/v5.0.8/UPGRADING.md).
 
+- `kratos` has been updated from 0.10.1 to the first stable version 1.0.0, please read the [0.10.1 to 0.11.0](https://github.com/ory/kratos/releases/tag/v0.11.0), [0.11.0 to 0.11.1](https://github.com/ory/kratos/releases/tag/v0.11.1), [0.11.1 to 0.13.0](https://github.com/ory/kratos/releases/tag/v0.13.0) and [0.13.0 to 1.0.0](https://github.com/ory/kratos/releases/tag/v1.0.0) upgrade guides. The most notable breaking change is the introduction of one-time passwords (`code`) and update of the default recovery strategy from `link` to `code`.
+
+- The `hail` NixOS module was removed, as `hail` was unmaintained since 2017.
+
+- Package `noto-fonts-emoji` was renamed to `noto-fonts-color-emoji`;
+  see [#221181](https://github.com/NixOS/nixpkgs/issues/221181).
+
+- Package `pash` was removed due to being archived upstream. Use `powershell` as an alternative.
+
+- `security.sudo.extraRules` now includes `root`'s default rule, with ordering
+  priority 400. This is functionally identical for users not specifying rule
+  order, or relying on `mkBefore` and `mkAfter`, but may impact users calling
+  `mkOrder n` with n ≤ 400.
+
+- `networking.networkmanager.firewallBackend` was removed as NixOS is now using iptables-nftables-compat even when using iptables, therefore Networkmanager now uses the nftables backend unconditionally.
+
+- `rome` was removed because it is no longer maintained and is succeeded by `biome`.
+
+- The `services.mtr-exporter.target` has been removed in favor of `services.mtr-exporter.jobs` which allows specifying multiple targets.
+
 ## Other Notable Changes {#sec-release-23.11-notable-changes}
 
 - The Cinnamon module now enables XDG desktop integration by default. If you are experiencing collisions related to xdg-desktop-portal-gtk you can safely remove `xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];` from your NixOS configuration.
@@ -193,16 +266,28 @@
 Unfortunately all servers supporting new clients (newer version of anki-sync-server, anki's built in sync server and this new rust package) do not support the older sync protocol that was used in the old server, so such old clients will also need updating and in particular the anki package in nixpkgs is also being updated in this release.
 The module update takes care of the new config syntax and the data itself (user login and cards) are compatible, so users of the module will be able to just log in again after updating both client and server without any extra action.
 
+- `services.matrix-synapse` has new options to configure worker processes for matrix-synapse using [`services.matrix-synapse.workers`](#opt-services.matrix-synapse.workers). It's also now possible to configure a local redis server using [`services.matrix-synapse.configureRedisLocally`](#opt-services.matrix-synapse.configureRedisLocally).
+
 - `services.nginx` gained a `defaultListen` option at server-level with support for PROXY protocol listeners, also `proxyProtocol` is now exposed in `services.nginx.virtualHosts.<name>.listen` option. It is now possible to run PROXY listeners and non-PROXY listeners at a server-level, see [#213510](https://github.com/NixOS/nixpkgs/pull/213510/) for more details.
 
+- `generic-extlinux-compatible` bootloader (and raspberry pi with uboot) supports appending secrets to the initramfs
+
+- `services.restic.backups` now adds wrapper scripts to your system path, which set the same environment variables as the service, so restic operations can easly be run from the command line. This behavior can be disabled by setting `createWrapper` to `false`, per backup configuration.
+
 - `services.prometheus.exporters` has a new exporter to monitor electrical power consumption based on PowercapRAPL sensor called [Scaphandre](https://github.com/hubblo-org/scaphandre), see [#239803](https://github.com/NixOS/nixpkgs/pull/239803) for more details.
 
+- The MariaDB C client library was upgraded from 3.2.x to 3.3.x. It is recomended to review the [upstream release notes](https://mariadb.com/kb/en/mariadb-connector-c-33-release-notes/).
+
 - The module `services.calibre-server` has new options to configure the `host`, `port`, `auth.enable`, `auth.mode` and `auth.userDb` path, see [#216497](https://github.com/NixOS/nixpkgs/pull/216497/) for more details.
 
 - `services.prometheus.exporters` has a new [exporter](https://github.com/hipages/php-fpm_exporter) to monitor PHP-FPM processes, see [#240394](https://github.com/NixOS/nixpkgs/pull/240394) for more details.
 
+- `services.github-runner` / `services.github-runners.<name>` gained the option `nodeRuntimes`. The option defaults to `[ "node20" ]`, i.e., the service supports Node.js 20 GitHub Actions only. The list of Node.js versions accepted by `nodeRuntimes` tracks the versions the upstream GitHub Actions runner supports. See [#249103](https://github.com/NixOS/nixpkgs/pull/249103) for details.
+
 - `programs.gnupg.agent.pinentryFlavor` is now set in `/etc/gnupg/gpg-agent.conf`, and will no longer take precedence over a `pinentry-program` set in `~/.gnupg/gpg-agent.conf`.
 
+- `dockerTools.buildImage`, `dockerTools.buildLayeredImage` and `dockerTools.streamLayeredImage` now use `lib.makeOverridable` to allow `dockerTools`-based images to be customized more efficiently at the nix-level.
+
 - `services.influxdb2` now supports doing an automatic initial setup and provisioning of users, organizations, buckets and authentication tokens, see [#249502](https://github.com/NixOS/nixpkgs/pull/249502) for more details.
 
 - `wrapHelm` now exposes `passthru.pluginsDir` which can be passed to `helmfile`. For convenience, a top-level package `helmfile-wrapped` has been added, which inherits `passthru.pluginsDir` from `kubernetes-helm-wrapped`. See [#217768](https://github.com/NixOS/nixpkgs/issues/217768) for details.
@@ -211,6 +296,29 @@ The module update takes care of the new config syntax and the data itself (user
 
 - Suricata was upgraded from 6.0 to 7.0 and no longer considers HTTP/2 support as experimental, see [upstream release notes](https://forum.suricata.io/t/suricata-7-0-0-released/3715) for more details.
 
+- `networking.nftables` now has the option `networking.nftables.table.<table>` to create tables
+  and have them be updated atomically, instead of flushing the ruleset.
+
+- `networking.nftables` is no longer flushing all rulesets on every reload.
+  Use `networking.nftables.flushRuleset = true;` to get back the old behaviour.
+
+- The `cawbird` package is dropped from nixpkgs, as it got broken by the Twitter API closing down and has been abandoned upstream.
+
+- `hardware.nvidia` gained `datacenter` options for enabling NVIDIA Data Center drivers and configuration of NVLink/NVSwitch topologies through `nv-fabricmanager`.
+
+- Certificate generation via the `security.acme` now limits the concurrent number of running certificate renewals and generation jobs, to avoid spiking resource usage when processing many certificates at once. The limit defaults to *5* and can be adjusted via `maxConcurrentRenewals`. Setting it to *0* disables the limits altogether.
+
+- New `boot.bcache.enable` (default enabled) allows completely removing `bcache` mount support.
+
+- The module `services.mbpfan` now has the option `aggressive` enabled by default for better heat moderation. You can disable it for upstream defaults.
+
+- `security.sudo` now provides two extra options, that do not change the
+  module's default behaviour:
+  - `defaultOptions` controls the options used for the default rules;
+  - `keepTerminfo` controls whether `TERMINFO` and `TERMINFO_DIRS` are preserved
+    for `root` and the `wheel` group.
+
+
 ## Nixpkgs internals {#sec-release-23.11-nixpkgs-internals}
 
 - The use of `sourceRoot = "source";`, `sourceRoot = "source/subdir";`, and similar lines in package derivations using the default `unpackPhase` is deprecated as it requires `unpackPhase` to always produce a directory named "source". Use `sourceRoot = src.name`, `sourceRoot = "${src.name}/subdir";`, or `setSourceRoot = "sourceRoot=$(echo */subdir)";` or similar instead.
@@ -238,3 +346,5 @@ The module update takes care of the new config syntax and the data itself (user
   ./common/auto-format-root-device.nix ];` When you use the systemd initrd, you
   can automatically format the root device by setting
   `virtualisation.fileSystems."/".autoFormat = true;`.
+
+- The `electron` packages now places its application files in `$out/libexec/electron` instead of `$out/lib/electron`. Packages using electron-builder will fail to build and need to be adjusted by changing `lib` to `libexec`.
diff --git a/nixos/lib/make-btrfs-fs.nix b/nixos/lib/make-btrfs-fs.nix
new file mode 100644
index 000000000000..225666f9a50e
--- /dev/null
+++ b/nixos/lib/make-btrfs-fs.nix
@@ -0,0 +1,65 @@
+# Builds an btrfs image containing a populated /nix/store with the closure
+# of store paths passed in the storePaths parameter, in addition to the
+# contents of a directory that can be populated with commands. The
+# generated image is sized to only fit its contents, with the expectation
+# that a script resizes the filesystem at boot time.
+{ pkgs
+, lib
+# List of derivations to be included
+, storePaths
+# Whether or not to compress the resulting image with zstd
+, compressImage ? false, zstd
+# Shell commands to populate the ./files directory.
+# All files in that directory are copied to the root of the FS.
+, populateImageCommands ? ""
+, volumeLabel
+, uuid ? "44444444-4444-4444-8888-888888888888"
+, btrfs-progs
+}:
+
+let
+  sdClosureInfo = pkgs.buildPackages.closureInfo { rootPaths = storePaths; };
+in
+pkgs.stdenv.mkDerivation {
+  name = "btrfs-fs.img${lib.optionalString compressImage ".zst"}";
+
+  nativeBuildInputs = [ btrfs-progs ] ++ lib.optional compressImage zstd;
+
+  buildCommand =
+    ''
+      ${if compressImage then "img=temp.img" else "img=$out"}
+
+      set -x
+      (
+          mkdir -p ./files
+          ${populateImageCommands}
+      )
+
+      mkdir -p ./rootImage/nix/store
+
+      xargs -I % cp -a --reflink=auto % -t ./rootImage/nix/store/ < ${sdClosureInfo}/store-paths
+      (
+        GLOBIGNORE=".:.."
+        shopt -u dotglob
+
+        for f in ./files/*; do
+            cp -a --reflink=auto -t ./rootImage/ "$f"
+        done
+      )
+
+      cp ${sdClosureInfo}/registration ./rootImage/nix-path-registration
+
+      touch $img
+      mkfs.btrfs -L ${volumeLabel} -U ${uuid} -r ./rootImage --shrink $img
+
+      if ! btrfs check $img; then
+        echo "--- 'btrfs check' failed for BTRFS image ---"
+        return 1
+      fi
+
+      if [ ${builtins.toString compressImage} ]; then
+        echo "Compressing image"
+        zstd -v --no-progress ./$img -o $out
+      fi
+    '';
+}
diff --git a/nixos/lib/systemd-lib.nix b/nixos/lib/systemd-lib.nix
index 641b47def039..f6535b514065 100644
--- a/nixos/lib/systemd-lib.nix
+++ b/nixos/lib/systemd-lib.nix
@@ -274,7 +274,7 @@ in rec {
       });
     in "${out}/bin/${scriptName}";
 
-  unitConfig = { config, options, ... }: {
+  unitConfig = { config, name, options, ... }: {
     config = {
       unitConfig =
         optionalAttrs (config.requires != [])
@@ -294,9 +294,9 @@ in rec {
         // optionalAttrs (config.requisite != [])
           { Requisite = toString config.requisite; }
         // optionalAttrs (config ? restartTriggers && config.restartTriggers != [])
-          { X-Restart-Triggers = "${pkgs.writeText "X-Restart-Triggers" (toString config.restartTriggers)}"; }
+          { X-Restart-Triggers = "${pkgs.writeText "X-Restart-Triggers-${name}" (toString config.restartTriggers)}"; }
         // optionalAttrs (config ? reloadTriggers && config.reloadTriggers != [])
-          { X-Reload-Triggers = "${pkgs.writeText "X-Reload-Triggers" (toString config.reloadTriggers)}"; }
+          { X-Reload-Triggers = "${pkgs.writeText "X-Reload-Triggers-${name}" (toString config.reloadTriggers)}"; }
         // optionalAttrs (config.description != "") {
           Description = config.description; }
         // optionalAttrs (config.documentation != []) {
diff --git a/nixos/lib/test-driver/test_driver/machine.py b/nixos/lib/test-driver/test_driver/machine.py
index 809fd690d717..2afcbc95c667 100644
--- a/nixos/lib/test-driver/test_driver/machine.py
+++ b/nixos/lib/test-driver/test_driver/machine.py
@@ -736,7 +736,7 @@ class Machine:
         )
         return output
 
-    def wait_until_tty_matches(self, tty: str, regexp: str) -> None:
+    def wait_until_tty_matches(self, tty: str, regexp: str, timeout: int = 900) -> None:
         """Wait until the visible output on the chosen TTY matches regular
         expression. Throws an exception on timeout.
         """
@@ -752,7 +752,7 @@ class Machine:
             return len(matcher.findall(text)) > 0
 
         with self.nested(f"waiting for {regexp} to appear on tty {tty}"):
-            retry(tty_matches)
+            retry(tty_matches, timeout)
 
     def send_chars(self, chars: str, delay: Optional[float] = 0.01) -> None:
         """
@@ -764,7 +764,7 @@ class Machine:
             for char in chars:
                 self.send_key(char, delay, log=False)
 
-    def wait_for_file(self, filename: str) -> None:
+    def wait_for_file(self, filename: str, timeout: int = 900) -> None:
         """
         Waits until the file exists in the machine's file system.
         """
@@ -774,9 +774,11 @@ class Machine:
             return status == 0
 
         with self.nested(f"waiting for file '{filename}'"):
-            retry(check_file)
+            retry(check_file, timeout)
 
-    def wait_for_open_port(self, port: int, addr: str = "localhost") -> None:
+    def wait_for_open_port(
+        self, port: int, addr: str = "localhost", timeout: int = 900
+    ) -> None:
         """
         Wait until a process is listening on the given TCP port and IP address
         (default `localhost`).
@@ -787,9 +789,11 @@ class Machine:
             return status == 0
 
         with self.nested(f"waiting for TCP port {port} on {addr}"):
-            retry(port_is_open)
+            retry(port_is_open, timeout)
 
-    def wait_for_closed_port(self, port: int, addr: str = "localhost") -> None:
+    def wait_for_closed_port(
+        self, port: int, addr: str = "localhost", timeout: int = 900
+    ) -> None:
         """
         Wait until nobody is listening on the given TCP port and IP address
         (default `localhost`).
@@ -800,7 +804,7 @@ class Machine:
             return status != 0
 
         with self.nested(f"waiting for TCP port {port} on {addr} to be closed"):
-            retry(port_is_closed)
+            retry(port_is_closed, timeout)
 
     def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
         return self.systemctl(f"start {jobname}", user)
@@ -974,7 +978,7 @@ class Machine:
         """
         return self._get_screen_text_variants([2])[0]
 
-    def wait_for_text(self, regex: str) -> None:
+    def wait_for_text(self, regex: str, timeout: int = 900) -> None:
         """
         Wait until the supplied regular expressions matches the textual
         contents of the screen by using optical character recognition (see
@@ -997,7 +1001,7 @@ class Machine:
             return False
 
         with self.nested(f"waiting for {regex} to appear on screen"):
-            retry(screen_matches)
+            retry(screen_matches, timeout)
 
     def wait_for_console_text(self, regex: str, timeout: int | None = None) -> None:
         """
@@ -1148,7 +1152,7 @@ class Machine:
         self.send_key("ctrl-alt-delete")
         self.connected = False
 
-    def wait_for_x(self) -> None:
+    def wait_for_x(self, timeout: int = 900) -> None:
         """
         Wait until it is possible to connect to the X server.
         """
@@ -1165,14 +1169,14 @@ class Machine:
             return status == 0
 
         with self.nested("waiting for the X11 server"):
-            retry(check_x)
+            retry(check_x, timeout)
 
     def get_window_names(self) -> List[str]:
         return self.succeed(
             r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
         ).splitlines()
 
-    def wait_for_window(self, regexp: str) -> None:
+    def wait_for_window(self, regexp: str, timeout: int = 900) -> None:
         """
         Wait until an X11 window has appeared whose name matches the given
         regular expression, e.g., `wait_for_window("Terminal")`.
@@ -1190,7 +1194,7 @@ class Machine:
             return any(pattern.search(name) for name in names)
 
         with self.nested("waiting for a window to appear"):
-            retry(window_is_visible)
+            retry(window_is_visible, timeout)
 
     def sleep(self, secs: int) -> None:
         # We want to sleep in *guest* time, not *host* time.
diff --git a/nixos/lib/testing/driver.nix b/nixos/lib/testing/driver.nix
index 23574698c062..cc97ca72083f 100644
--- a/nixos/lib/testing/driver.nix
+++ b/nixos/lib/testing/driver.nix
@@ -175,7 +175,12 @@ in
   };
 
   config = {
-    _module.args.hostPkgs = config.hostPkgs;
+    _module.args = {
+      hostPkgs =
+        # Comment is in nixos/modules/misc/nixpkgs.nix
+        lib.mkOverride lib.modules.defaultOverridePriority
+          config.hostPkgs.__splicedPackages;
+    };
 
     driver = withChecks driver;
 
diff --git a/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix b/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix
new file mode 100644
index 000000000000..7b743d170bc6
--- /dev/null
+++ b/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix
@@ -0,0 +1,20 @@
+# Edit this configuration file to define what should be installed on
+# your system.  Help is available in the configuration.nix(5) man page
+# and in the NixOS manual (accessible by running ‘nixos-help’).
+
+{ config, pkgs, lib, ... }:
+
+{
+  imports =
+    [
+      # Include the default lxd configuration.
+      ../../../modules/virtualisation/lxc-container.nix
+      # Include the container-specific autogenerated configuration.
+      ./lxd.nix
+    ];
+
+  networking.useDHCP = false;
+  networking.interfaces.eth0.useDHCP = true;
+
+  system.stateVersion = "21.05"; # Did you read the comment?
+}
diff --git a/nixos/maintainers/scripts/lxd/lxd-image.nix b/nixos/maintainers/scripts/lxd/lxd-container-image.nix
index 07605c5c3120..3bd1320b2b68 100644
--- a/nixos/maintainers/scripts/lxd/lxd-image.nix
+++ b/nixos/maintainers/scripts/lxd/lxd-container-image.nix
@@ -1,4 +1,4 @@
-{ lib, config, pkgs, ... }:
+{ lib, pkgs, ... }:
 
 {
   imports = [
@@ -16,8 +16,8 @@
   system.activationScripts.config = ''
     if [ ! -e /etc/nixos/configuration.nix ]; then
       mkdir -p /etc/nixos
-      cat ${./lxd-image-inner.nix} > /etc/nixos/configuration.nix
-      sed 's|../../../modules/virtualisation/lxc-container.nix|<nixpkgs/nixos/modules/virtualisation/lxc-container.nix>|g' -i /etc/nixos/configuration.nix
+      cat ${./lxd-container-image-inner.nix} > /etc/nixos/configuration.nix
+      ${lib.getExe pkgs.gnused} 's|../../../modules/virtualisation/lxc-container.nix|<nixpkgs/nixos/modules/virtualisation/lxc-container.nix>|g' -i /etc/nixos/configuration.nix
     fi
   '';
 
diff --git a/nixos/maintainers/scripts/lxd/lxd-image-inner.nix b/nixos/maintainers/scripts/lxd/lxd-image-inner.nix
deleted file mode 100644
index c1a9b1aacd18..000000000000
--- a/nixos/maintainers/scripts/lxd/lxd-image-inner.nix
+++ /dev/null
@@ -1,95 +0,0 @@
-# Edit this configuration file to define what should be installed on
-# your system.  Help is available in the configuration.nix(5) man page
-# and in the NixOS manual (accessible by running ‘nixos-help’).
-
-{ config, pkgs, lib, ... }:
-
-{
-  imports =
-    [ # Include the default lxd configuration.
-      ../../../modules/virtualisation/lxc-container.nix
-      # Include the container-specific autogenerated configuration.
-      ./lxd.nix
-    ];
-
-  # networking.hostName = mkForce "nixos"; # Overwrite the hostname.
-  # networking.wireless.enable = true;  # Enables wireless support via wpa_supplicant.
-
-  # Set your time zone.
-  # time.timeZone = "Europe/Amsterdam";
-
-  # The global useDHCP flag is deprecated, therefore explicitly set to false here.
-  # Per-interface useDHCP will be mandatory in the future, so this generated config
-  # replicates the default behaviour.
-  networking.useDHCP = false;
-  networking.interfaces.eth0.useDHCP = true;
-
-  # Configure network proxy if necessary
-  # networking.proxy.default = "http://user:password@proxy:port/";
-  # networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
-
-  # Select internationalisation properties.
-  # i18n.defaultLocale = "en_US.UTF-8";
-  # console = {
-  #   font = "Lat2-Terminus16";
-  #   keyMap = "us";
-  # };
-
-  # Enable the X11 windowing system.
-  # services.xserver.enable = true;
-
-  # Configure keymap in X11
-  # services.xserver.layout = "us";
-  # services.xserver.xkbOptions = "eurosign:e";
-
-  # Enable CUPS to print documents.
-  # services.printing.enable = true;
-
-  # Enable sound.
-  # sound.enable = true;
-  # hardware.pulseaudio.enable = true;
-
-  # Enable touchpad support (enabled default in most desktopManager).
-  # services.xserver.libinput.enable = true;
-
-  # Define a user account. Don't forget to set a password with ‘passwd’.
-  # users.users.alice = {
-  #   isNormalUser = true;
-  #   extraGroups = [ "wheel" ]; # Enable ‘sudo’ for the user.
-  # };
-
-  # List packages installed in system profile. To search, run:
-  # $ nix search wget
-  # environment.systemPackages = with pkgs; [
-  #   vim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.
-  #   wget
-  #   firefox
-  # ];
-
-  # Some programs need SUID wrappers, can be configured further or are
-  # started in user sessions.
-  # programs.mtr.enable = true;
-  # programs.gnupg.agent = {
-  #   enable = true;
-  #   enableSSHSupport = true;
-  # };
-
-  # List services that you want to enable:
-
-  # Enable the OpenSSH daemon.
-  # services.openssh.enable = true;
-
-  # Open ports in the firewall.
-  # networking.firewall.allowedTCPPorts = [ ... ];
-  # networking.firewall.allowedUDPPorts = [ ... ];
-  # Or disable the firewall altogether.
-  # networking.firewall.enable = false;
-
-  # This value determines the NixOS release from which the default
-  # settings for stateful data, like file locations and database versions
-  # on your system were taken. It’s perfectly fine and recommended to leave
-  # this value at the release version of the first install of this system.
-  # Before changing this value read the documentation for this option
-  # (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
-  system.stateVersion = "21.05"; # Did you read the comment?
-}
diff --git a/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix b/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix
new file mode 100644
index 000000000000..a8f2c63ac5c6
--- /dev/null
+++ b/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix
@@ -0,0 +1,20 @@
+# Edit this configuration file to define what should be installed on
+# your system.  Help is available in the configuration.nix(5) man page
+# and in the NixOS manual (accessible by running ‘nixos-help’).
+
+{ config, pkgs, lib, ... }:
+
+{
+  imports =
+    [
+      # Include the default lxd configuration.
+      ../../../modules/virtualisation/lxd-virtual-machine.nix
+      # Include the container-specific autogenerated configuration.
+      ./lxd.nix
+    ];
+
+  networking.useDHCP = false;
+  networking.interfaces.eth0.useDHCP = true;
+
+  system.stateVersion = "23.05"; # Did you read the comment?
+}
diff --git a/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix b/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix
new file mode 100644
index 000000000000..eb0d9217d402
--- /dev/null
+++ b/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix
@@ -0,0 +1,27 @@
+{ lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../../modules/virtualisation/lxd-virtual-machine.nix
+  ];
+
+  virtualisation.lxc.templates.nix = {
+    enable = true;
+    target = "/etc/nixos/lxd.nix";
+    template = ./nix.tpl;
+    when = ["create" "copy"];
+  };
+
+  # copy the config for nixos-rebuild
+  system.activationScripts.config = ''
+    if [ ! -e /etc/nixos/configuration.nix ]; then
+      mkdir -p /etc/nixos
+      cat ${./lxd-virtual-machine-image-inner.nix} > /etc/nixos/configuration.nix
+      ${lib.getExe pkgs.gnused} 's|../../../modules/virtualisation/lxd-virtual-machine.nix|<nixpkgs/nixos/modules/virtualisation/lxd-virtual-machine.nix>|g' -i /etc/nixos/configuration.nix
+    fi
+  '';
+
+  # Network
+  networking.useDHCP = false;
+  networking.interfaces.enp5s0.useDHCP = true;
+}
diff --git a/nixos/maintainers/scripts/oci/create-image.sh b/nixos/maintainers/scripts/oci/create-image.sh
new file mode 100755
index 000000000000..0d7332a0b272
--- /dev/null
+++ b/nixos/maintainers/scripts/oci/create-image.sh
@@ -0,0 +1,24 @@
+#! /usr/bin/env bash
+
+set -euo pipefail
+
+export NIX_PATH=nixpkgs=$(dirname $(readlink -f $0))/../../../..
+export NIXOS_CONFIG=$(dirname $(readlink -f $0))/../../../modules/virtualisation/oci-image.nix
+
+if (( $# < 1 )); then
+    (
+    echo "Usage: create-image.sh <architecture>"
+    echo
+    echo "Where <architecture> is one of:"
+    echo "  x86_64-linux"
+    echo "  aarch64-linux"
+    ) >&2
+fi
+
+system="$1"; shift
+
+nix-build '<nixpkgs/nixos>' \
+    -A config.system.build.OCIImage \
+    --argstr system "$system" \
+    --option system-features kvm \
+    -o oci-image
diff --git a/nixos/maintainers/scripts/oci/upload-image.sh b/nixos/maintainers/scripts/oci/upload-image.sh
new file mode 100755
index 000000000000..e4870e94bf54
--- /dev/null
+++ b/nixos/maintainers/scripts/oci/upload-image.sh
@@ -0,0 +1,100 @@
+#! /usr/bin/env bash
+
+set -euo pipefail
+
+script_dir="$(dirname $(readlink -f $0))"
+nixpkgs_root="$script_dir/../../../.."
+export NIX_PATH="nixpkgs=$nixpkgs_root"
+
+cat - <<EOF
+This script will locally build a NixOS image and upload it as a Custom Image
+using oci-cli. Make sure that an API key for the tenancy administrator has been
+added to '~/.oci'.
+For more info about configuring oci-cli, please visit
+https://docs.cloud.oracle.com/iaas/Content/API/Concepts/apisigningkey.htm#Required_Keys_and_OCIDs
+
+EOF
+
+qcow="oci-image/nixos.qcow2"
+if [ ! -f "$qcow" ]; then
+    echo "OCI image $qcow does not exist"
+    echo "Building image with create-image.sh for 'x86_64-linux'"
+    "$script_dir/create-image.sh" x86_64-linux
+    [ -f "$qcow" ] || { echo "Build failed: image not present after build"; exit 1; }
+else
+    echo "Using prebuilt image $qcow"
+fi
+
+cli="$(
+  nix-build '<nixpkgs>' \
+    --no-out-link \
+    -A oci-cli
+)"
+
+PATH="$cli/bin:$PATH"
+bucket="_TEMP_NIXOS_IMAGES_$RANDOM"
+
+echo "Creating a temporary bucket"
+root_ocid="$(
+  oci iam compartment list \
+  --all \
+  --compartment-id-in-subtree true \
+  --access-level ACCESSIBLE \
+  --include-root \
+  --raw-output \
+  --query "data[?contains(\"id\",'tenancy')].id | [0]"
+)"
+bucket_ocid=$(
+  oci os bucket create \
+    -c "$root_ocid" \
+    --name "$bucket" \
+    --raw-output \
+    --query "data.id"
+)
+# Clean up bucket on script termination
+trap 'echo Removing temporary bucket; oci os bucket delete --force --name "$bucket"' INT TERM EXIT
+
+echo "Uploading image to temporary bucket"
+oci os object put -bn "$bucket" --file "$qcow"
+
+echo "Importing image as a Custom Image"
+bucket_ns="$(oci os ns get --query "data" --raw-output)"
+image_id="$(
+  oci compute image import from-object \
+    -c "$root_ocid" \
+    --namespace "$bucket_ns" \
+    --bucket-name "$bucket" \
+    --name nixos.qcow2 \
+    --operating-system NixOS \
+    --source-image-type QCOW2 \
+    --launch-mode PARAVIRTUALIZED \
+    --display-name NixOS \
+    --raw-output \
+    --query "data.id"
+)"
+
+cat - <<EOF
+Image created! Please mark all available shapes as compatible with this image by
+visiting the following link and by selecting the 'Edit Details' button on:
+https://cloud.oracle.com/compute/images/$image_id
+EOF
+
+# Workaround until https://github.com/oracle/oci-cli/issues/399 is addressed
+echo "Sleeping for 15 minutes before cleaning up files in the temporary bucket"
+sleep $((15 * 60))
+
+echo "Deleting image from bucket"
+par_id="$(
+  oci os preauth-request list \
+    --bucket-name "$bucket" \
+    --raw-output \
+    --query "data[0].id"
+)"
+
+if [[ -n $par_id ]]; then
+  oci os preauth-request delete \
+    --bucket-name "$bucket" \
+    --par-id "$par_id"
+fi
+
+oci os object delete -bn "$bucket" --object-name nixos.qcow2 --force
diff --git a/nixos/modules/config/fonts/packages.nix b/nixos/modules/config/fonts/packages.nix
index 46907d5411ca..37b705ecb345 100644
--- a/nixos/modules/config/fonts/packages.nix
+++ b/nixos/modules/config/fonts/packages.nix
@@ -37,7 +37,7 @@ in
       gyre-fonts # TrueType substitutes for standard PostScript fonts
       liberation_ttf
       unifont
-      noto-fonts-emoji
+      noto-fonts-color-emoji
     ]);
   };
 }
diff --git a/nixos/modules/config/terminfo.nix b/nixos/modules/config/terminfo.nix
index 82f9ae48372a..d1dbc4e0d059 100644
--- a/nixos/modules/config/terminfo.nix
+++ b/nixos/modules/config/terminfo.nix
@@ -6,26 +6,48 @@ with lib;
 
 {
 
-  options.environment.enableAllTerminfo = with lib; mkOption {
-    default = false;
-    type = types.bool;
-    description = lib.mdDoc ''
-      Whether to install all terminfo outputs
-    '';
+  options = with lib; {
+    environment.enableAllTerminfo = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to install all terminfo outputs
+      '';
+    };
+
+    security.sudo.keepTerminfo = mkOption {
+      default = config.security.sudo.package.pname != "sudo-rs";
+      defaultText = literalMD ''
+        `true` unless using `sudo-rs`
+      '';
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to preserve the `TERMINFO` and `TERMINFO_DIRS`
+        environment variables, for `root` and the `wheel` group.
+      '';
+    };
   };
 
   config = {
 
-    # can be generated with: filter (drv: (builtins.tryEval (drv ? terminfo)).value) (attrValues pkgs)
+    # can be generated with:
+    # attrNames (filterAttrs
+    #  (_: drv: (builtins.tryEval (isDerivation drv && drv ? terminfo)).value)
+    #  pkgs)
     environment.systemPackages = mkIf config.environment.enableAllTerminfo (map (x: x.terminfo) (with pkgs; [
       alacritty
+      contour
       foot
       kitty
       mtm
+      rio
       rxvt-unicode-unwrapped
       rxvt-unicode-unwrapped-emoji
+      st
       termite
+      tmux
       wezterm
+      yaft
     ]));
 
     environment.pathsToLink = [
@@ -46,7 +68,7 @@ with lib;
       export TERM=$TERM
     '';
 
-    security.sudo.extraConfig = ''
+    security.sudo.extraConfig = mkIf config.security.sudo.keepTerminfo ''
 
       # Keep terminfo database for root and %wheel.
       Defaults:root,%wheel env_keep+=TERMINFO_DIRS
diff --git a/nixos/modules/config/update-users-groups.pl b/nixos/modules/config/update-users-groups.pl
index 75c343523e27..7c6851473f42 100644
--- a/nixos/modules/config/update-users-groups.pl
+++ b/nixos/modules/config/update-users-groups.pl
@@ -4,6 +4,7 @@ use File::Path qw(make_path);
 use File::Slurp;
 use Getopt::Long;
 use JSON;
+use Time::Piece;
 
 # Keep track of deleted uids and gids.
 my $uidMapFile = "/var/lib/nixos/uid-map";
@@ -22,6 +23,13 @@ sub updateFile {
     write_file($path, { atomic => 1, binmode => ':utf8', perms => $perms // 0644 }, $contents) or die;
 }
 
+# Converts an ISO date to number of days since 1970-01-01
+sub dateToDays {
+    my ($date) = @_;
+    my $time = Time::Piece->strptime($date, "%Y-%m-%d");
+    return $time->epoch / 60 / 60 / 24;
+}
+
 sub nscdInvalidate {
     system("nscd", "--invalidate", $_[0]) unless $is_dry;
 }
@@ -231,12 +239,12 @@ foreach my $u (@{$spec->{users}}) {
         chmod oct($u->{homeMode}), $u->{home};
     }
 
-    if (defined $u->{passwordFile}) {
-        if (-e $u->{passwordFile}) {
-            $u->{hashedPassword} = read_file($u->{passwordFile});
+    if (defined $u->{hashedPasswordFile}) {
+        if (-e $u->{hashedPasswordFile}) {
+            $u->{hashedPassword} = read_file($u->{hashedPasswordFile});
             chomp $u->{hashedPassword};
         } else {
-            warn "warning: password file ‘$u->{passwordFile}’ does not exist\n";
+            warn "warning: password file ‘$u->{hashedPasswordFile}’ does not exist\n";
         }
     } elsif (defined $u->{password}) {
         $u->{hashedPassword} = hashPassword($u->{password});
@@ -285,22 +293,26 @@ my %shadowSeen;
 
 foreach my $line (-f "/etc/shadow" ? read_file("/etc/shadow", { binmode => ":utf8" }) : ()) {
     chomp $line;
-    my ($name, $hashedPassword, @rest) = split(':', $line, -9);
-    my $u = $usersOut{$name};;
+    # struct name copied from `man 3 shadow`
+    my ($sp_namp, $sp_pwdp, $sp_lstch, $sp_min, $sp_max, $sp_warn, $sp_inact, $sp_expire, $sp_flag) = split(':', $line, -9);
+    my $u = $usersOut{$sp_namp};;
     next if !defined $u;
-    $hashedPassword = "!" if !$spec->{mutableUsers};
-    $hashedPassword = $u->{hashedPassword} if defined $u->{hashedPassword} && !$spec->{mutableUsers}; # FIXME
-    chomp $hashedPassword;
-    push @shadowNew, join(":", $name, $hashedPassword, @rest) . "\n";
-    $shadowSeen{$name} = 1;
+    $sp_pwdp = "!" if !$spec->{mutableUsers};
+    $sp_pwdp = $u->{hashedPassword} if defined $u->{hashedPassword} && !$spec->{mutableUsers}; # FIXME
+    $sp_expire = dateToDays($u->{expires}) if defined $u->{expires};
+    chomp $sp_pwdp;
+    push @shadowNew, join(":", $sp_namp, $sp_pwdp, $sp_lstch, $sp_min, $sp_max, $sp_warn, $sp_inact, $sp_expire, $sp_flag) . "\n";
+    $shadowSeen{$sp_namp} = 1;
 }
 
 foreach my $u (values %usersOut) {
     next if defined $shadowSeen{$u->{name}};
     my $hashedPassword = "!";
     $hashedPassword = $u->{hashedPassword} if defined $u->{hashedPassword};
+    my $expires = "";
+    $expires = dateToDays($u->{expires}) if defined $u->{expires};
     # FIXME: set correct value for sp_lstchg.
-    push @shadowNew, join(":", $u->{name}, $hashedPassword, "1::::::") . "\n";
+    push @shadowNew, join(":", $u->{name}, $hashedPassword, "1::::", $expires, "") . "\n";
 }
 
 updateFile("/etc/shadow", \@shadowNew, 0640);
diff --git a/nixos/modules/config/users-groups.nix b/nixos/modules/config/users-groups.nix
index 4c9e286ea5fd..785084209b00 100644
--- a/nixos/modules/config/users-groups.nix
+++ b/nixos/modules/config/users-groups.nix
@@ -18,11 +18,11 @@ let
 
   passwordDescription = ''
     The options {option}`hashedPassword`,
-    {option}`password` and {option}`passwordFile`
+    {option}`password` and {option}`hashedPasswordFile`
     controls what password is set for the user.
     {option}`hashedPassword` overrides both
-    {option}`password` and {option}`passwordFile`.
-    {option}`password` overrides {option}`passwordFile`.
+    {option}`password` and {option}`hashedPasswordFile`.
+    {option}`password` overrides {option}`hashedPasswordFile`.
     If none of these three options are set, no password is assigned to
     the user, and the user will not be able to do password logins.
     If the option {option}`users.mutableUsers` is true, the
@@ -250,18 +250,26 @@ let
         '';
       };
 
-      passwordFile = mkOption {
+      hashedPasswordFile = mkOption {
         type = with types; nullOr str;
-        default = null;
+        default = cfg.users.${name}.passwordFile;
+        defaultText = literalExpression "null";
         description = lib.mdDoc ''
-          The full path to a file that contains the user's password. The password
-          file is read on each system activation. The file should contain
-          exactly one line, which should be the password in an encrypted form
-          that is suitable for the `chpasswd -e` command.
+          The full path to a file that contains the hash of the user's
+          password. The password file is read on each system activation. The
+          file should contain exactly one line, which should be the password in
+          an encrypted form that is suitable for the `chpasswd -e` command.
           ${passwordDescription}
         '';
       };
 
+      passwordFile = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        visible = false;
+        description = lib.mdDoc "Deprecated alias of hashedPasswordFile";
+      };
+
       initialHashedPassword = mkOption {
         type = with types; nullOr (passwdEntry str);
         default = null;
@@ -311,6 +319,17 @@ let
         '';
       };
 
+      expires = mkOption {
+        type = types.nullOr (types.strMatching "[[:digit:]]{4}-[[:digit:]]{2}-[[:digit:]]{2}");
+        default = null;
+        description = lib.mdDoc ''
+          Set the date on which the user's account will no longer be
+          accessible. The date is expressed in the format YYYY-MM-DD, or null
+          to disable the expiry.
+          A user whose account is locked must contact the system
+          administrator before being able to use the system again.
+        '';
+      };
     };
 
     config = mkMerge
@@ -436,9 +455,9 @@ let
     users = mapAttrsToList (_: u:
       { inherit (u)
           name uid group description home homeMode createHome isSystemUser
-          password passwordFile hashedPassword
+          password hashedPasswordFile hashedPassword
           autoSubUidGidRange subUidRanges subGidRanges
-          initialPassword initialHashedPassword;
+          initialPassword initialHashedPassword expires;
         shell = utils.toShellPath u.shell;
       }) cfg.users;
     groups = attrValues cfg.groups;
@@ -681,6 +700,7 @@ in {
 
     environment.profiles = [
       "$HOME/.nix-profile"
+      "\${XDG_STATE_HOME:-$HOME/.local/state}/nix/profile"
       "/etc/profiles/per-user/$USER"
     ];
 
@@ -745,7 +765,7 @@ in {
             &&
             (allowsLogin cfg.hashedPassword
              || cfg.password != null
-             || cfg.passwordFile != null
+             || cfg.hashedPasswordFile != null
              || cfg.openssh.authorizedKeys.keys != []
              || cfg.openssh.authorizedKeys.keyFiles != [])
           ) cfg.users ++ [
@@ -834,9 +854,13 @@ in {
           The password hash of user "${user.name}" may be invalid. You must set a
           valid hash or the user will be locked out of their account. Please
           check the value of option `users.users."${user.name}".hashedPassword`.''
-        else null
-      ));
-
+        else null)
+        ++ flip mapAttrsToList cfg.users (name: user:
+          if user.passwordFile != null then
+            ''The option `users.users."${name}".passwordFile' has been renamed '' +
+            ''to `users.users."${name}".hashedPasswordFile'.''
+          else null)
+      );
   };
 
 }
diff --git a/nixos/modules/hardware/decklink.nix b/nixos/modules/hardware/decklink.nix
new file mode 100644
index 000000000000..d179e1d7634f
--- /dev/null
+++ b/nixos/modules/hardware/decklink.nix
@@ -0,0 +1,16 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.decklink;
+  kernelPackages = config.boot.kernelPackages;
+in
+{
+  options.hardware.decklink.enable = lib.mkEnableOption "hardware support for the Blackmagic Design Decklink audio/video interfaces";
+
+  config = lib.mkIf cfg.enable {
+    boot.kernelModules = [ "blackmagic" "blackmagic-io" "snd_blackmagic-io" ];
+    boot.extraModulePackages = [ kernelPackages.decklink ];
+    systemd.packages = [ pkgs.blackmagic-desktop-video ];
+    systemd.services.DesktopVideoHelper.wantedBy = [ "multi-user.target" ];
+  };
+}
diff --git a/nixos/modules/hardware/glasgow.nix b/nixos/modules/hardware/glasgow.nix
new file mode 100644
index 000000000000..f8ebb772c47b
--- /dev/null
+++ b/nixos/modules/hardware/glasgow.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.glasgow;
+
+in
+{
+  options.hardware.glasgow = {
+    enable = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables Glasgow udev rules and ensures 'plugdev' group exists.
+        This is a prerequisite to using Glasgow without being root.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.udev.packages = [ pkgs.glasgow ];
+    users.groups.plugdev = { };
+  };
+}
diff --git a/nixos/modules/hardware/infiniband.nix b/nixos/modules/hardware/infiniband.nix
new file mode 100644
index 000000000000..962883fa7972
--- /dev/null
+++ b/nixos/modules/hardware/infiniband.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.infiniband;
+  opensm-services = {
+    "opensm@" = {
+      enable = true;
+      description = "Starts OpenSM Infiniband fabric Subnet Managers";
+      before = [ "network.target"];
+      unitConfig = {
+        ConditionPathExists = "/sys/class/infiniband_mad/abi_version";
+      };
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.opensm}/bin/opensm --guid %I --log_file /var/log/opensm.%I.log";
+      };
+    };
+  } // (builtins.listToAttrs (map (guid: {
+    name = "opensm@${guid}";
+    value = {
+      enable = true;
+      wantedBy = [ "machines.target" ];
+      overrideStrategy = "asDropin";
+    };
+  } ) cfg.guids));
+
+in
+
+{
+  options.hardware.infiniband = {
+    enable = mkEnableOption "Infiniband support";
+    guids = mkOption {
+      type = with types; listOf str;
+      default = [];
+      example = [ "0xe8ebd30000eee2e1" ];
+      description = lib.mdDoc ''
+        A list of infiniband port guids on the system. This is discoverable using `ibstat -p`
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.initrd.kernelModules = [
+      "mlx5_core" "mlx5_ib" "ib_cm"
+      "rdma_cm" "rdma_ucm" "rpcrdma"
+      "ib_ipoib" "ib_isert" "ib_umad" "ib_uverbs"
+    ];
+    # rdma-core exposes ibstat, mstflint exposes mstconfig (which can be needed for
+    # setting link configurations), qperf needed to affirm link speeds
+    environment.systemPackages = with pkgs; [
+      rdma-core mstflint qperf
+    ];
+    systemd.services = opensm-services;
+  };
+}
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index 67c3afcf320a..a40713ac25c7 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -4,8 +4,9 @@
   pkgs,
   ...
 }: let
+  nvidiaEnabled = (lib.elem "nvidia" config.services.xserver.videoDrivers);
   nvidia_x11 =
-    if (lib.elem "nvidia" config.services.xserver.videoDrivers)
+    if nvidiaEnabled || cfg.datacenter.enable
     then cfg.package
     else null;
 
@@ -18,9 +19,64 @@
   primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
   busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
   ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
+  settingsFormat = pkgs.formats.keyValue {};
 in {
   options = {
     hardware.nvidia = {
+      datacenter.enable = lib.mkEnableOption (lib.mdDoc ''
+        Data Center drivers for NVIDIA cards on a NVLink topology.
+      '');
+      datacenter.settings = lib.mkOption {
+        type = settingsFormat.type;
+        default = {
+          LOG_LEVEL=4;
+          LOG_FILE_NAME="/var/log/fabricmanager.log";
+          LOG_APPEND_TO_LOG=1;
+          LOG_FILE_MAX_SIZE=1024;
+          LOG_USE_SYSLOG=0;
+          DAEMONIZE=1;
+          BIND_INTERFACE_IP="127.0.0.1";
+          STARTING_TCP_PORT=16000;
+          FABRIC_MODE=0;
+          FABRIC_MODE_RESTART=0;
+          STATE_FILE_NAME="/var/tmp/fabricmanager.state";
+          FM_CMD_BIND_INTERFACE="127.0.0.1";
+          FM_CMD_PORT_NUMBER=6666;
+          FM_STAY_RESIDENT_ON_FAILURES=0;
+          ACCESS_LINK_FAILURE_MODE=0;
+          TRUNK_LINK_FAILURE_MODE=0;
+          NVSWITCH_FAILURE_MODE=0;
+          ABORT_CUDA_JOBS_ON_FM_EXIT=1;
+          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
+        };
+        defaultText = lib.literalExpression ''
+        {
+          LOG_LEVEL=4;
+          LOG_FILE_NAME="/var/log/fabricmanager.log";
+          LOG_APPEND_TO_LOG=1;
+          LOG_FILE_MAX_SIZE=1024;
+          LOG_USE_SYSLOG=0;
+          DAEMONIZE=1;
+          BIND_INTERFACE_IP="127.0.0.1";
+          STARTING_TCP_PORT=16000;
+          FABRIC_MODE=0;
+          FABRIC_MODE_RESTART=0;
+          STATE_FILE_NAME="/var/tmp/fabricmanager.state";
+          FM_CMD_BIND_INTERFACE="127.0.0.1";
+          FM_CMD_PORT_NUMBER=6666;
+          FM_STAY_RESIDENT_ON_FAILURES=0;
+          ACCESS_LINK_FAILURE_MODE=0;
+          TRUNK_LINK_FAILURE_MODE=0;
+          NVSWITCH_FAILURE_MODE=0;
+          ABORT_CUDA_JOBS_ON_FM_EXIT=1;
+          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
+        }
+        '';
+        description = lib.mdDoc ''
+          Additional configuration options for fabricmanager.
+        '';
+      };
+
       powerManagement.enable = lib.mkEnableOption (lib.mdDoc ''
         experimental power management through systemd. For more information, see
         the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
@@ -167,9 +223,15 @@ in {
         It also drastically increases the time the driver needs to clock down after load.
       '');
 
-      package = lib.mkPackageOptionMD config.boot.kernelPackages.nvidiaPackages "nvidia_x11" {
-        default = "stable";
+      package = lib.mkOption {
+        default = config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
+        defaultText = lib.literalExpression ''
+          config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
+        '';
         example = lib.mdDoc "config.boot.kernelPackages.nvidiaPackages.legacy_470";
+        description = lib.mdDoc ''
+          The NVIDIA driver package to use.
+        '';
       };
 
       open = lib.mkEnableOption (lib.mdDoc ''
@@ -188,8 +250,46 @@ in {
       then pCfg.intelBusId
       else pCfg.amdgpuBusId;
   in
-    lib.mkIf (nvidia_x11 != null) {
-      assertions = [
+    lib.mkIf (nvidia_x11 != null) (lib.mkMerge [
+      # Common
+      ({
+        assertions = [
+          {
+            assertion = !(nvidiaEnabled && cfg.datacenter.enable);
+            message = "You cannot configure both X11 and Data Center drivers at the same time.";
+          }
+        ];
+        boot = {
+          blacklistedKernelModules = ["nouveau" "nvidiafb"];
+          kernelModules = [ "nvidia-uvm" ];
+        };
+        systemd.tmpfiles.rules =
+          lib.optional config.virtualisation.docker.enableNvidia
+            "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
+        services.udev.extraRules =
+        ''
+          # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
+          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 255'"
+          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) $${i}; done'"
+          KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 254'"
+          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
+          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 1'"
+        '';
+        hardware.opengl = {
+          extraPackages = [
+            nvidia_x11.out
+          ];
+          extraPackages32 = [
+            nvidia_x11.lib32
+          ];
+        };
+        environment.systemPackages = [
+          nvidia_x11.bin
+        ];
+      })
+      # X11
+      (lib.mkIf nvidiaEnabled {
+        assertions = [
         {
           assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
           message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
@@ -248,227 +348,207 @@ in {
         {
           assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
           message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
-        }
-      ];
-
-      # If Optimus/PRIME is enabled, we:
-      # - Specify the configured NVIDIA GPU bus ID in the Device section for the
-      #   "nvidia" driver.
-      # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
-      #   "nvidia" driver, in order to allow the X server to start without any outputs.
-      # - Add a separate Device section for the Intel GPU, using the "modesetting"
-      #   driver and with the configured BusID.
-      # - OR add a separate Device section for the AMD APU, using the "amdgpu"
-      #   driver and with the configures BusID.
-      # - Reference that Device section from the ServerLayout section as an inactive
-      #   device.
-      # - Configure the display manager to run specific `xrandr` commands which will
-      #   configure/enable displays connected to the Intel iGPU / AMD APU.
-
-      # reverse sync implies offloading
-      hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
-
-      services.xserver.drivers =
-        lib.optional primeEnabled {
-          name = igpuDriver;
-          display = offloadCfg.enable;
-          modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
-          deviceSection =
-            ''
-              BusID "${igpuBusId}"
-            ''
-            + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
-              Option "AccelMethod" "none"
-            '';
-        }
-        ++ lib.singleton {
-          name = "nvidia";
-          modules = [nvidia_x11.bin];
-          display = !offloadCfg.enable;
-          deviceSection =
-            lib.optionalString primeEnabled
-            ''
-              BusID "${pCfg.nvidiaBusId}"
-            ''
-            + lib.optionalString pCfg.allowExternalGpu ''
-              Option "AllowExternalGpus"
-            '';
-          screenSection =
-            ''
-              Option "RandRRotation" "on"
-            ''
-            + lib.optionalString syncCfg.enable ''
-              Option "AllowEmptyInitialConfiguration"
-            ''
-            + lib.optionalString cfg.forceFullCompositionPipeline ''
-              Option         "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
-              Option         "AllowIndirectGLXProtocol" "off"
-              Option         "TripleBuffer" "on"
-            '';
-        };
-
-      services.xserver.serverLayoutSection =
-        lib.optionalString syncCfg.enable ''
-          Inactive "Device-${igpuDriver}[0]"
-        ''
-        + lib.optionalString reverseSyncCfg.enable ''
-          Inactive "Device-nvidia[0]"
-        ''
-        + lib.optionalString offloadCfg.enable ''
-          Option "AllowNVIDIAGPUScreens"
-        '';
-
-      services.xserver.displayManager.setupCommands = let
-        gpuProviderName =
-          if igpuDriver == "amdgpu"
-          then
-            # find the name of the provider if amdgpu
-            "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
-          else igpuDriver;
-        providerCmdParams =
-          if syncCfg.enable
-          then "\"${gpuProviderName}\" NVIDIA-0"
-          else "NVIDIA-G0 \"${gpuProviderName}\"";
-      in
-        lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
-          # Added by nvidia configuration module for Optimus/PRIME.
-          ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
-          ${lib.getExe pkgs.xorg.xrandr} --auto
-        '';
-
-      environment.etc = {
-        "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
+        }];
+
+        # If Optimus/PRIME is enabled, we:
+        # - Specify the configured NVIDIA GPU bus ID in the Device section for the
+        #   "nvidia" driver.
+        # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
+        #   "nvidia" driver, in order to allow the X server to start without any outputs.
+        # - Add a separate Device section for the Intel GPU, using the "modesetting"
+        #   driver and with the configured BusID.
+        # - OR add a separate Device section for the AMD APU, using the "amdgpu"
+        #   driver and with the configures BusID.
+        # - Reference that Device section from the ServerLayout section as an inactive
+        #   device.
+        # - Configure the display manager to run specific `xrandr` commands which will
+        #   configure/enable displays connected to the Intel iGPU / AMD APU.
+
+        # reverse sync implies offloading
+        hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
+
+        services.xserver.drivers =
+          lib.optional primeEnabled {
+            name = igpuDriver;
+            display = offloadCfg.enable;
+            modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
+            deviceSection =
+              ''
+                BusID "${igpuBusId}"
+              ''
+              + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
+                Option "AccelMethod" "none"
+              '';
+          }
+          ++ lib.singleton {
+            name = "nvidia";
+            modules = [nvidia_x11.bin];
+            display = !offloadCfg.enable;
+            deviceSection =
+              lib.optionalString primeEnabled
+              ''
+                BusID "${pCfg.nvidiaBusId}"
+              ''
+              + lib.optionalString pCfg.allowExternalGpu ''
+                Option "AllowExternalGpus"
+              '';
+            screenSection =
+              ''
+                Option "RandRRotation" "on"
+              ''
+              + lib.optionalString syncCfg.enable ''
+                Option "AllowEmptyInitialConfiguration"
+              ''
+              + lib.optionalString cfg.forceFullCompositionPipeline ''
+                Option         "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
+                Option         "AllowIndirectGLXProtocol" "off"
+                Option         "TripleBuffer" "on"
+              '';
+          };
 
-        # 'nvidia_x11' installs it's files to /run/opengl-driver/...
-        "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
-      };
+        services.xserver.serverLayoutSection =
+          lib.optionalString syncCfg.enable ''
+            Inactive "Device-${igpuDriver}[0]"
+          ''
+          + lib.optionalString reverseSyncCfg.enable ''
+            Inactive "Device-nvidia[0]"
+          ''
+          + lib.optionalString offloadCfg.enable ''
+            Option "AllowNVIDIAGPUScreens"
+          '';
+
+        services.xserver.displayManager.setupCommands = let
+          gpuProviderName =
+            if igpuDriver == "amdgpu"
+            then
+              # find the name of the provider if amdgpu
+              "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
+            else igpuDriver;
+          providerCmdParams =
+            if syncCfg.enable
+            then "\"${gpuProviderName}\" NVIDIA-0"
+            else "NVIDIA-G0 \"${gpuProviderName}\"";
+        in
+          lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
+            # Added by nvidia configuration module for Optimus/PRIME.
+            ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
+            ${lib.getExe pkgs.xorg.xrandr} --auto
+          '';
+
+        environment.etc = {
+          "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
+
+          # 'nvidia_x11' installs it's files to /run/opengl-driver/...
+          "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
+        };
 
-      hardware.opengl = {
-        extraPackages = [
-          nvidia_x11.out
-          pkgs.nvidia-vaapi-driver
-        ];
-        extraPackages32 = [
-          nvidia_x11.lib32
-          pkgs.pkgsi686Linux.nvidia-vaapi-driver
-        ];
-      };
-      environment.systemPackages =
-        [nvidia_x11.bin]
-        ++ lib.optional cfg.nvidiaSettings nvidia_x11.settings
-        ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
-        ++ lib.optional offloadCfg.enableOffloadCmd
-        (pkgs.writeShellScriptBin "nvidia-offload" ''
-          export __NV_PRIME_RENDER_OFFLOAD=1
-          export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
-          export __GLX_VENDOR_LIBRARY_NAME=nvidia
-          export __VK_LAYER_NV_optimus=NVIDIA_only
-          exec "$@"
-        '');
-
-      systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
-
-      systemd.services = let
-        nvidiaService = state: {
-          description = "NVIDIA system ${state} actions";
-          path = [pkgs.kbd];
-          serviceConfig = {
-            Type = "oneshot";
-            ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
-          };
-          before = ["systemd-${state}.service"];
-          requiredBy = ["systemd-${state}.service"];
+        hardware.opengl = {
+          extraPackages = [
+            pkgs.nvidia-vaapi-driver
+          ];
+          extraPackages32 = [
+            pkgs.pkgsi686Linux.nvidia-vaapi-driver
+          ];
         };
-      in
-        lib.mkMerge [
-          (lib.mkIf cfg.powerManagement.enable {
-            nvidia-suspend = nvidiaService "suspend";
-            nvidia-hibernate = nvidiaService "hibernate";
-            nvidia-resume =
-              (nvidiaService "resume")
-              // {
-                before = [];
-                after = ["systemd-suspend.service" "systemd-hibernate.service"];
-                requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
-              };
-          })
-          (lib.mkIf cfg.nvidiaPersistenced {
-            "nvidia-persistenced" = {
-              description = "NVIDIA Persistence Daemon";
-              wantedBy = ["multi-user.target"];
-              serviceConfig = {
-                Type = "forking";
-                Restart = "always";
-                PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
-                ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
-                ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
-              };
+        environment.systemPackages =
+          lib.optional cfg.nvidiaSettings nvidia_x11.settings
+          ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
+          ++ lib.optional offloadCfg.enableOffloadCmd
+          (pkgs.writeShellScriptBin "nvidia-offload" ''
+            export __NV_PRIME_RENDER_OFFLOAD=1
+            export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
+            export __GLX_VENDOR_LIBRARY_NAME=nvidia
+            export __VK_LAYER_NV_optimus=NVIDIA_only
+            exec "$@"
+          '');
+
+        systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
+
+        systemd.services = let
+          nvidiaService = state: {
+            description = "NVIDIA system ${state} actions";
+            path = [pkgs.kbd];
+            serviceConfig = {
+              Type = "oneshot";
+              ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
             };
-          })
-          (lib.mkIf cfg.dynamicBoost.enable {
-            "nvidia-powerd" = {
-              description = "nvidia-powerd service";
-              path = [
-                pkgs.util-linux # nvidia-powerd wants lscpu
-              ];
-              wantedBy = ["multi-user.target"];
-              serviceConfig = {
-                Type = "dbus";
-                BusName = "nvidia.powerd.server";
-                ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
+            before = ["systemd-${state}.service"];
+            requiredBy = ["systemd-${state}.service"];
+          };
+        in
+          lib.mkMerge [
+            (lib.mkIf cfg.powerManagement.enable {
+              nvidia-suspend = nvidiaService "suspend";
+              nvidia-hibernate = nvidiaService "hibernate";
+              nvidia-resume =
+                (nvidiaService "resume")
+                // {
+                  before = [];
+                  after = ["systemd-suspend.service" "systemd-hibernate.service"];
+                  requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
+                };
+            })
+            (lib.mkIf cfg.nvidiaPersistenced {
+              "nvidia-persistenced" = {
+                description = "NVIDIA Persistence Daemon";
+                wantedBy = ["multi-user.target"];
+                serviceConfig = {
+                  Type = "forking";
+                  Restart = "always";
+                  PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
+                  ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
+                  ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+                };
               };
-            };
-          })
-        ];
-
-      services.acpid.enable = true;
-
-      services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
-
-      hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
-
-      systemd.tmpfiles.rules =
-        lib.optional config.virtualisation.docker.enableNvidia
-        "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
-        ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
-        "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
-
-      boot = {
-        blacklistedKernelModules = ["nouveau" "nvidiafb"];
-
-        extraModulePackages =
-          if cfg.open
-          then [nvidia_x11.open]
-          else [nvidia_x11.bin];
-
-        # nvidia-uvm is required by CUDA applications.
-        kernelModules =
-          ["nvidia-uvm"]
-          ++ lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
-
-        # If requested enable modesetting via kernel parameter.
-        kernelParams =
-          lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
-          ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
-          ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
-          ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
-
-        # enable finegrained power management
-        extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
-          options nvidia "NVreg_DynamicPowerManagement=0x02"
-        '';
-      };
-
-      services.udev.extraRules =
-        ''
-          # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
-          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 255'"
-          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) $${i}; done'"
-          KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 254'"
-          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
-          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 1'"
-        ''
-        + lib.optionalString cfg.powerManagement.finegrained (
+            })
+            (lib.mkIf cfg.dynamicBoost.enable {
+              "nvidia-powerd" = {
+                description = "nvidia-powerd service";
+                path = [
+                  pkgs.util-linux # nvidia-powerd wants lscpu
+                ];
+                wantedBy = ["multi-user.target"];
+                serviceConfig = {
+                  Type = "dbus";
+                  BusName = "nvidia.powerd.server";
+                  ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
+                };
+              };
+            })
+          ];
+        services.acpid.enable = true;
+
+        services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
+
+        hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
+
+        systemd.tmpfiles.rules =
+          lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+          "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
+
+        boot = {
+          extraModulePackages =
+            if cfg.open
+            then [nvidia_x11.open]
+            else [nvidia_x11.bin];
+          # nvidia-uvm is required by CUDA applications.
+          kernelModules =
+            lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
+
+          # If requested enable modesetting via kernel parameter.
+          kernelParams =
+            lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
+            ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
+            ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
+            ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
+
+          # enable finegrained power management
+          extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
+            options nvidia "NVreg_DynamicPowerManagement=0x02"
+          '';
+        };
+        services.udev.extraRules =
+          lib.optionalString cfg.powerManagement.finegrained (
           lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
             # Remove NVIDIA USB xHCI Host Controller devices, if present
             ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
@@ -489,5 +569,30 @@ in {
             ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
           ''
         );
-    };
+      })
+      # Data Center
+      (lib.mkIf (cfg.datacenter.enable) {
+        boot.extraModulePackages = [
+          nvidia_x11.bin
+        ];
+        systemd.services.nvidia-fabricmanager = {
+          enable = true;
+          description = "Start NVIDIA NVLink Management";
+          wantedBy = [ "multi-user.target" ];
+          unitConfig.After = [ "network-online.target" ];
+          unitConfig.Requires = [ "network-online.target" ];
+          serviceConfig = {
+            Type = "forking";
+            TimeoutStartSec = 240;
+            ExecStart = let
+              nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
+              in
+                nvidia_x11.fabricmanager + "/bin/nv-fabricmanager -c " + nv-fab-conf;
+            LimitCORE="infinity";
+          };
+        };
+        environment.systemPackages =
+          lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager;
+      })
+    ]);
 }
diff --git a/nixos/modules/i18n/input-method/uim.nix b/nixos/modules/i18n/input-method/uim.nix
index 9491ab2640fc..7225783b2a6f 100644
--- a/nixos/modules/i18n/input-method/uim.nix
+++ b/nixos/modules/i18n/input-method/uim.nix
@@ -10,7 +10,7 @@ in
 
     i18n.inputMethod.uim = {
       toolbar = mkOption {
-        type    = types.enum [ "gtk" "gtk3" "gtk-systray" "gtk3-systray" "qt4" ];
+        type    = types.enum [ "gtk" "gtk3" "gtk-systray" "gtk3-systray" "qt5" ];
         default = "gtk";
         example = "gtk-systray";
         description = lib.mdDoc ''
diff --git a/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix b/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
index ea8056ff870c..573b31b439c2 100644
--- a/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
+++ b/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
@@ -6,7 +6,6 @@
   imports = [ ./installation-cd-graphical-base.nix ];
 
   isoImage.edition = "gnome";
-  isoImage.graphicalGrub = true;
 
   services.xserver.desktopManager.gnome = {
     # Add Firefox and other tools useful for installation to the launcher
diff --git a/nixos/modules/installer/cd-dvd/iso-image.nix b/nixos/modules/installer/cd-dvd/iso-image.nix
index c430048d6598..0b5135c088ea 100644
--- a/nixos/modules/installer/cd-dvd/iso-image.nix
+++ b/nixos/modules/installer/cd-dvd/iso-image.nix
@@ -24,6 +24,9 @@ let
         # Name appended to menuentry defaults to params if no specific name given.
         option.name or (optionalString (option ? params) "(${option.params})")
         }' ${optionalString (option ? class) " --class ${option.class}"} {
+          # Fallback to UEFI console for boot, efifb sometimes has difficulties.
+          terminal_output console
+
           linux ${defaults.image} \''${isoboot} ${defaults.params} ${
             option.params or ""
           }
@@ -185,33 +188,25 @@ let
       # So instead we'll list a lot of possibly valid modes :/
       #"3840x2160"
       #"2560x1440"
+      "1920x1200"
       "1920x1080"
       "1366x768"
+      "1280x800"
       "1280x720"
+      "1200x1920"
       "1024x768"
+      "800x1280"
       "800x600"
       "auto"
     ]}
 
-    # Fonts can be loaded?
-    # (This font is assumed to always be provided as a fallback by NixOS)
-    if loadfont (\$root)/EFI/boot/unicode.pf2; then
-      set with_fonts=true
-    fi
-    if [ "\$textmode" != "true" -a "\$with_fonts" == "true" ]; then
-      # Use graphical term, it can be either with background image or a theme.
-      # input is "console", while output is "gfxterm".
-      # This enables "serial" input and output only when possible.
-      # Otherwise the failure mode is to not even enable gfxterm.
-      if test "\$with_serial" == "yes"; then
-        terminal_output gfxterm serial
-        terminal_input  console serial
-      else
-        terminal_output gfxterm
-        terminal_input  console
-      fi
+    if [ "\$textmode" == "false" ]; then
+      terminal_output gfxterm
+      terminal_input  console
     else
-      # Sets colors for the non-graphical term.
+      terminal_output console
+      terminal_input  console
+      # Sets colors for console term.
       set menu_color_normal=cyan/blue
       set menu_color_highlight=white/blue
     fi
@@ -250,18 +245,58 @@ let
     touch $out/EFI/nixos-installer-image
 
     # ALWAYS required modules.
-    MODULES="fat iso9660 part_gpt part_msdos \
-             normal boot linux configfile loopback chain halt \
-             efifwsetup efi_gop \
-             ls search search_label search_fs_uuid search_fs_file \
-             gfxmenu gfxterm gfxterm_background gfxterm_menu test all_video loadenv \
-             exfat ext2 ntfs btrfs hfsplus udf \
-             videoinfo png \
-             echo serial \
-            "
+    MODULES=(
+      # Basic modules for filesystems and partition schemes
+      "fat"
+      "iso9660"
+      "part_gpt"
+      "part_msdos"
+
+      # Basic stuff
+      "normal"
+      "boot"
+      "linux"
+      "configfile"
+      "loopback"
+      "chain"
+      "halt"
+
+      # Allows rebooting into firmware setup interface
+      "efifwsetup"
+
+      # EFI Graphics Output Protocol
+      "efi_gop"
+
+      # User commands
+      "ls"
+
+      # System commands
+      "search"
+      "search_label"
+      "search_fs_uuid"
+      "search_fs_file"
+      "echo"
+
+      # We're not using it anymore, but we'll leave it in so it can be used
+      # by user, with the console using "C"
+      "serial"
+
+      # Graphical mode stuff
+      "gfxmenu"
+      "gfxterm"
+      "gfxterm_background"
+      "gfxterm_menu"
+      "test"
+      "loadenv"
+      "all_video"
+      "videoinfo"
+
+      # File types for graphical mode
+      "png"
+    )
 
     echo "Building GRUB with modules:"
-    for mod in $MODULES; do
+    for mod in ''${MODULES[@]}; do
       echo " - $mod"
     done
 
@@ -270,31 +305,27 @@ let
     for mod in efi_uga; do
       if [ -f ${grubPkgs.grub2_efi}/lib/grub/${grubPkgs.grub2_efi.grubTarget}/$mod.mod ]; then
         echo " - $mod"
-        MODULES+=" $mod"
+        MODULES+=("$mod")
       fi
     done
 
     # Make our own efi program, we can't rely on "grub-install" since it seems to
     # probe for devices, even with --skip-fs-probe.
-    grub-mkimage --directory=${grubPkgs.grub2_efi}/lib/grub/${grubPkgs.grub2_efi.grubTarget} -o $out/EFI/boot/boot${targetArch}.efi -p /EFI/boot -O ${grubPkgs.grub2_efi.grubTarget} \
-      $MODULES
+    grub-mkimage \
+      --directory=${grubPkgs.grub2_efi}/lib/grub/${grubPkgs.grub2_efi.grubTarget} \
+      -o $out/EFI/boot/boot${targetArch}.efi \
+      -p /EFI/boot \
+      -O ${grubPkgs.grub2_efi.grubTarget} \
+      ''${MODULES[@]}
     cp ${grubPkgs.grub2_efi}/share/grub/unicode.pf2 $out/EFI/boot/
 
     cat <<EOF > $out/EFI/boot/grub.cfg
 
-    set with_fonts=false
-    set textmode=${boolToString (!config.isoImage.graphicalGrub)}
-    # If you want to use serial for "terminal_*" commands, you need to set one up:
-    #   Example manual configuration:
-    #    → serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
-    # This uses the defaults, and makes the serial terminal available.
-    set with_serial=no
-    if serial; then set with_serial=yes ;fi
-    export with_serial
-    clear
+    set textmode=${boolToString (config.isoImage.forceTextMode)}
     set timeout=${toString grubEfiTimeout}
 
-    # This message will only be viewable when "gfxterm" is not used.
+    clear
+    # This message will only be viewable on the default (UEFI) console.
     echo ""
     echo "Loading graphical boot menu..."
     echo ""
@@ -306,7 +337,7 @@ let
     hiddenentry 'Text mode' --hotkey 't' {
       loadfont (\$root)/EFI/boot/unicode.pf2
       set textmode=true
-      terminal_output gfxterm console
+      terminal_output console
     }
     hiddenentry 'GUI mode' --hotkey 'g' {
       $(find ${config.isoImage.grubTheme} -iname '*.pf2' -printf "loadfont (\$root)/EFI/boot/grub-theme/%P\n")
@@ -400,6 +431,8 @@ let
     }
     EOF
 
+    grub-script-check $out/EFI/boot/grub.cfg
+
     ${refind}
   '';
 
@@ -658,13 +691,17 @@ in
       '';
     };
 
-    isoImage.graphicalGrub = mkOption {
+    isoImage.forceTextMode = mkOption {
       default = false;
       type = types.bool;
       example = true;
       description = lib.mdDoc ''
-        Whether to use textmode or graphical grub.
-        false means we use textmode grub.
+        Whether to use text mode instead of graphical grub.
+        A value of `true` means graphical mode is not tried to be used.
+
+        This is useful for validating that graphics mode usage is not at the root cause of a problem with the iso image.
+
+        If text mode is required off-handedly (e.g. for serial use) you can use the `T` key, after being prompted, to use text mode for the current boot.
       '';
     };
 
diff --git a/nixos/modules/installer/tools/tools.nix b/nixos/modules/installer/tools/tools.nix
index 6564b583464a..78bcbbe2db5a 100644
--- a/nixos/modules/installer/tools/tools.nix
+++ b/nixos/modules/installer/tools/tools.nix
@@ -134,8 +134,8 @@ in
 
     system.nixos-generate-config.configuration = mkDefault ''
       # Edit this configuration file to define what should be installed on
-      # your system.  Help is available in the configuration.nix(5) man page
-      # and in the NixOS manual (accessible by running `nixos-help`).
+      # your system. Help is available in the configuration.nix(5) man page, on
+      # https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
 
       { config, lib, pkgs, ... }:
 
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 3812aa14760d..206d5eaf75de 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -55,14 +55,17 @@
   ./hardware/cpu/amd-sev.nix
   ./hardware/cpu/intel-microcode.nix
   ./hardware/cpu/intel-sgx.nix
+  ./hardware/decklink.nix
   ./hardware/device-tree.nix
   ./hardware/digitalbitbox.nix
   ./hardware/flipperzero.nix
   ./hardware/flirc.nix
   ./hardware/gkraken.nix
+  ./hardware/glasgow.nix
   ./hardware/gpgsmartcards.nix
   ./hardware/hackrf.nix
   ./hardware/i2c.nix
+  ./hardware/infiniband.nix
   ./hardware/keyboard/qmk.nix
   ./hardware/keyboard/teck.nix
   ./hardware/keyboard/uhk.nix
@@ -163,6 +166,7 @@
   ./programs/direnv.nix
   ./programs/dmrconfig.nix
   ./programs/droidcam.nix
+  ./programs/ecryptfs.nix
   ./programs/environment.nix
   ./programs/evince.nix
   ./programs/extra-container.nix
@@ -276,6 +280,8 @@
   ./programs/xss-lock.nix
   ./programs/xwayland.nix
   ./programs/yabar.nix
+  ./programs/yazi.nix
+  ./programs/yubikey-touch-detector.nix
   ./programs/zmap.nix
   ./programs/zsh/oh-my-zsh.nix
   ./programs/zsh/zsh-autoenv.nix
@@ -305,6 +311,7 @@
   ./security/rngd.nix
   ./security/rtkit.nix
   ./security/sudo.nix
+  ./security/sudo-rs.nix
   ./security/systemd-confinement.nix
   ./security/tpm2.nix
   ./security/wrappers/default.nix
@@ -393,7 +400,6 @@
   ./services/continuous-integration/gitlab-runner.nix
   ./services/continuous-integration/gocd-agent/default.nix
   ./services/continuous-integration/gocd-server/default.nix
-  ./services/continuous-integration/hail.nix
   ./services/continuous-integration/hercules-ci-agent/default.nix
   ./services/continuous-integration/hydra/default.nix
   ./services/continuous-integration/jenkins/default.nix
@@ -493,6 +499,7 @@
   ./services/games/quake3-server.nix
   ./services/games/teeworlds.nix
   ./services/games/terraria.nix
+  ./services/games/xonotic.nix
   ./services/hardware/acpid.nix
   ./services/hardware/actkbd.nix
   ./services/hardware/argonone.nix
@@ -594,6 +601,7 @@
   ./services/mail/rss2email.nix
   ./services/mail/schleuder.nix
   ./services/mail/spamassassin.nix
+  ./services/mail/stalwart-mail.nix
   ./services/mail/sympa.nix
   ./services/mail/zeyple.nix
   ./services/matrix/appservice-discord.nix
@@ -643,6 +651,7 @@
   ./services/misc/etesync-dav.nix
   ./services/misc/evdevremapkeys.nix
   ./services/misc/felix.nix
+  ./services/misc/forgejo.nix
   ./services/misc/freeswitch.nix
   ./services/misc/fstrim.nix
   ./services/misc/gammu-smsd.nix
@@ -893,6 +902,7 @@
   ./services/networking/flannel.nix
   ./services/networking/freenet.nix
   ./services/networking/freeradius.nix
+  ./services/networking/frp.nix
   ./services/networking/frr.nix
   ./services/networking/gateone.nix
   ./services/networking/gdomap.nix
@@ -981,6 +991,7 @@
   ./services/networking/nix-serve.nix
   ./services/networking/nix-store-gcs-proxy.nix
   ./services/networking/nixops-dns.nix
+  ./services/networking/nncp.nix
   ./services/networking/nntp-proxy.nix
   ./services/networking/nomad.nix
   ./services/networking/nsd.nix
@@ -1062,7 +1073,6 @@
   ./services/networking/tayga.nix
   ./services/networking/tcpcrypt.nix
   ./services/networking/teamspeak3.nix
-  ./services/networking/tedicross.nix
   ./services/networking/teleport.nix
   ./services/networking/tetrd.nix
   ./services/networking/tftpd.nix
@@ -1227,6 +1237,7 @@
   ./services/web-apps/healthchecks.nix
   ./services/web-apps/hedgedoc.nix
   ./services/web-apps/hledger-web.nix
+  ./services/web-apps/honk.nix
   ./services/web-apps/icingaweb2/icingaweb2.nix
   ./services/web-apps/icingaweb2/module-monitoring.nix
   ./services/web-apps/invidious.nix
@@ -1245,6 +1256,7 @@
   ./services/web-apps/matomo.nix
   ./services/web-apps/mattermost.nix
   ./services/web-apps/mediawiki.nix
+  ./services/web-apps/meme-bingo-web.nix
   ./services/web-apps/miniflux.nix
   ./services/web-apps/monica.nix
   ./services/web-apps/moodle.nix
@@ -1256,6 +1268,7 @@
   ./services/web-apps/node-red.nix
   ./services/web-apps/onlyoffice.nix
   ./services/web-apps/openvscode-server.nix
+  ./services/web-apps/mobilizon.nix
   ./services/web-apps/openwebrx.nix
   ./services/web-apps/outline.nix
   ./services/web-apps/peering-manager.nix
@@ -1473,6 +1486,7 @@
   ./virtualisation/nixos-containers.nix
   ./virtualisation/oci-containers.nix
   ./virtualisation/openstack-options.nix
+  ./virtualisation/oci-options.nix
   ./virtualisation/openvswitch.nix
   ./virtualisation/parallels-guest.nix
   ./virtualisation/podman/default.nix
diff --git a/nixos/modules/programs/bash/bash.nix b/nixos/modules/programs/bash/bash.nix
index 286faeadc489..7d3322ea5e50 100644
--- a/nixos/modules/programs/bash/bash.nix
+++ b/nixos/modules/programs/bash/bash.nix
@@ -81,7 +81,7 @@ in
           if [ "$TERM" != "dumb" ] || [ -n "$INSIDE_EMACS" ]; then
             PROMPT_COLOR="1;31m"
             ((UID)) && PROMPT_COLOR="1;32m"
-            if [ -n "$INSIDE_EMACS" ] || [ "$TERM" = "eterm" ] || [ "$TERM" = "eterm-color" ]; then
+            if [ -n "$INSIDE_EMACS" ]; then
               # Emacs term mode doesn't support xterm title escape sequence (\e]0;)
               PS1="\n\[\033[$PROMPT_COLOR\][\u@\h:\w]\\$\[\033[0m\] "
             else
diff --git a/nixos/modules/programs/clash-verge.nix b/nixos/modules/programs/clash-verge.nix
index 29977be3858f..57a1c0377edb 100644
--- a/nixos/modules/programs/clash-verge.nix
+++ b/nixos/modules/programs/clash-verge.nix
@@ -2,17 +2,9 @@
 
 {
   options.programs.clash-verge = {
-    enable = lib.mkEnableOption (lib.mdDoc ''
-      Clash Verge.
-    '');
-
-    autoStart = lib.mkEnableOption (lib.mdDoc ''
-      Clash Verge Auto Launch.
-    '');
-
-    tunMode = lib.mkEnableOption (lib.mdDoc ''
-      Clash Verge Tun Mode.
-    '');
+    enable = lib.mkEnableOption (lib.mdDoc "Clash Verge");
+    autoStart = lib.mkEnableOption (lib.mdDoc "Clash Verge auto launch");
+    tunMode = lib.mkEnableOption (lib.mdDoc "Clash Verge TUN mode");
   };
 
   config =
diff --git a/nixos/modules/programs/dconf.nix b/nixos/modules/programs/dconf.nix
index 7261a143528f..cf53658c4fad 100644
--- a/nixos/modules/programs/dconf.nix
+++ b/nixos/modules/programs/dconf.nix
@@ -1,55 +1,217 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.programs.dconf;
-  cfgDir = pkgs.symlinkJoin {
-    name = "dconf-system-config";
-    paths = map (x: "${x}/etc/dconf") cfg.packages;
-    postBuild = ''
-      mkdir -p $out/profile
-      mkdir -p $out/db
-    '' + (
-      concatStringsSep "\n" (
-        mapAttrsToList (
-          name: path: ''
-            ln -s ${path} $out/profile/${name}
-          ''
-        ) cfg.profiles
-      )
-    ) + ''
-      ${pkgs.dconf}/bin/dconf update $out/db
-    '';
+
+  # Compile keyfiles to dconf DB
+  compileDconfDb = dir: pkgs.runCommand "dconf-db"
+    {
+      nativeBuildInputs = [ (lib.getBin pkgs.dconf) ];
+    } "dconf compile $out ${dir}";
+
+  # Check if dconf keyfiles are valid
+  checkDconfKeyfiles = dir: pkgs.runCommand "check-dconf-keyfiles"
+    {
+      nativeBuildInputs = [ (lib.getBin pkgs.dconf) ];
+    } ''
+    if [[ -f ${dir} ]]; then
+      echo "dconf keyfiles should be a directory but a file is provided: ${dir}"
+      exit 1
+    fi
+
+    dconf compile db ${dir} || (
+      echo "The dconf keyfiles are invalid: ${dir}"
+      exit 1
+    )
+    cp -R ${dir} $out
+  '';
+
+  mkAllLocks = settings: lib.flatten (
+    lib.mapAttrsToList (k: v: lib.mapAttrsToList (k': _: "/${k}/${k'}") v) settings);
+
+  # Generate dconf DB from dconfDatabase and keyfiles
+  mkDconfDb = val: compileDconfDb (pkgs.symlinkJoin {
+    name = "nixos-generated-dconf-keyfiles";
+    paths = [
+      (pkgs.writeTextDir "nixos-generated-dconf-keyfiles" (lib.generators.toDconfINI val.settings))
+      (pkgs.writeTextDir "locks/nixos-generated-dconf-locks" (lib.concatStringsSep "\n"
+        (if val.lockAll then mkAllLocks val.settings else val.locks)
+      ))
+    ] ++ (map checkDconfKeyfiles val.keyfiles);
+  });
+
+  # Check if a dconf DB file is valid. The dconf cli doesn't return 1 when it can't
+  # open the database file so we have to check if the output is empty.
+  checkDconfDb = file: pkgs.runCommand "check-dconf-db"
+    {
+      nativeBuildInputs = [ (lib.getBin pkgs.dconf) ];
+    } ''
+    if [[ -d ${file} ]]; then
+      echo "dconf DB should be a file but a directory is provided: ${file}"
+      exit 1
+    fi
+
+    echo "file-db:${file}" > profile
+    DCONF_PROFILE=$(pwd)/profile dconf dump / > output 2> error
+    if [[ ! -s output ]] && [[ -s error ]]; then
+      cat error
+      echo "The dconf DB file is invalid: ${file}"
+      exit 1
+    fi
+
+    cp ${file} $out
+  '';
+
+  # Generate dconf profile
+  mkDconfProfile = name: value:
+    if lib.isDerivation value || lib.isPath value then
+      pkgs.runCommand "dconf-profile" { } ''
+        if [[ -d ${value} ]]; then
+          echo "Dconf profile should be a file but a directory is provided."
+          exit 1
+        fi
+        mkdir -p $out/etc/dconf/profile/
+        cp ${value} $out/etc/dconf/profile/${name}
+      ''
+    else
+      pkgs.writeTextDir "etc/dconf/profile/${name}" (
+        lib.concatMapStrings (x: "${x}\n") ((
+          lib.optional value.enableUserDb "user-db:user"
+        ) ++ (
+          map
+            (value:
+              let
+                db = if lib.isAttrs value && !lib.isDerivation value then mkDconfDb value else checkDconfDb value;
+              in
+              "file-db:${db}")
+            value.databases
+        ))
+      );
+
+  dconfDatabase = with lib.types; submodule {
+    options = {
+      keyfiles = lib.mkOption {
+        type = listOf (oneOf [ path package ]);
+        default = [ ];
+        description = lib.mdDoc "A list of dconf keyfile directories.";
+      };
+      settings = lib.mkOption {
+        type = attrs;
+        default = { };
+        description = lib.mdDoc "An attrset used to generate dconf keyfile.";
+        example = literalExpression ''
+          with lib.gvariant;
+          {
+            "com/raggesilver/BlackBox" = {
+              scrollback-lines = mkUint32 10000;
+              theme-dark = "Tommorow Night";
+            };
+          }
+        '';
+      };
+      locks = lib.mkOption {
+        type = with lib.types; listOf str;
+        default = [ ];
+        description = lib.mdDoc ''
+          A list of dconf keys to be lockdown. This doesn't take effect if `lockAll`
+          is set.
+        '';
+        example = literalExpression ''
+          [ "/org/gnome/desktop/background/picture-uri" ]
+        '';
+      };
+      lockAll = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc "Lockdown all dconf keys in `settings`.";
+      };
+    };
+  };
+
+  dconfProfile = with lib.types; submodule {
+    options = {
+      enableUserDb = lib.mkOption {
+        type = bool;
+        default = true;
+        description = lib.mdDoc "Add `user-db:user` at the beginning of the profile.";
+      };
+
+      databases = lib.mkOption {
+        type = with lib.types; listOf (oneOf [
+          path
+          package
+          dconfDatabase
+        ]);
+        default = [ ];
+        description = lib.mdDoc ''
+          List of data sources for the profile. An element can be an attrset,
+          or the path of an already compiled database. Each element is converted
+          to a file-db.
+
+          A key is searched from up to down and the first result takes the
+          priority. If a lock for a particular key is installed then the value from
+          the last database in the profile where the key is locked will be used.
+          This can be used to enforce mandatory settings.
+        '';
+      };
+    };
   };
+
 in
 {
-  ###### interface
-
   options = {
     programs.dconf = {
-      enable = mkEnableOption (lib.mdDoc "dconf");
+      enable = lib.mkEnableOption (lib.mdDoc "dconf");
 
-      profiles = mkOption {
-        type = types.attrsOf types.path;
-        default = {};
-        description = lib.mdDoc "Set of dconf profile files, installed at {file}`/etc/dconf/profiles/«name»`.";
-        internal = true;
+      profiles = lib.mkOption {
+        type = with lib.types; attrsOf (oneOf [
+          path
+          package
+          dconfProfile
+        ]);
+        default = { };
+        description = lib.mdDoc ''
+          Attrset of dconf profiles. By default the `user` profile is used which
+          ends up in `/etc/dconf/profile/user`.
+        '';
+        example = lib.literalExpression ''
+          {
+            # A "user" profile with a database
+            user.databases = [
+              {
+                settings = { };
+              }
+            ];
+            # A "bar" profile from a package
+            bar = pkgs.bar-dconf-profile;
+            # A "foo" profile from a path
+            foo = ''${./foo}
+          };
+        '';
       };
 
-      packages = mkOption {
-        type = types.listOf types.package;
-        default = [];
+      packages = lib.mkOption {
+        type = lib.types.listOf lib.types.package;
+        default = [ ];
         description = lib.mdDoc "A list of packages which provide dconf profiles and databases in {file}`/etc/dconf`.";
       };
     };
   };
 
-  ###### implementation
+  config = lib.mkIf (cfg.profiles != { } || cfg.enable) {
+    programs.dconf.packages = lib.mapAttrsToList mkDconfProfile cfg.profiles;
 
-  config = mkIf (cfg.profiles != {} || cfg.enable) {
-    environment.etc.dconf = mkIf (cfg.profiles != {} || cfg.packages != []) {
-      source = cfgDir;
+    environment.etc.dconf = lib.mkIf (cfg.packages != [ ]) {
+      source = pkgs.symlinkJoin {
+        name = "dconf-system-config";
+        paths = map (x: "${x}/etc/dconf") cfg.packages;
+        nativeBuildInputs = [ (lib.getBin pkgs.dconf) ];
+        postBuild = ''
+          if test -d $out/db; then
+            dconf update $out/db
+          fi
+        '';
+      };
     };
 
     services.dbus.packages = [ pkgs.dconf ];
@@ -59,8 +221,9 @@ in
     # For dconf executable
     environment.systemPackages = [ pkgs.dconf ];
 
-    # Needed for unwrapped applications
-    environment.sessionVariables.GIO_EXTRA_MODULES = mkIf cfg.enable [ "${pkgs.dconf.lib}/lib/gio/modules" ];
+    environment.sessionVariables = lib.mkIf cfg.enable {
+      # Needed for unwrapped applications
+      GIO_EXTRA_MODULES = [ "${pkgs.dconf.lib}/lib/gio/modules" ];
+    };
   };
-
 }
diff --git a/nixos/modules/programs/direnv.nix b/nixos/modules/programs/direnv.nix
index 53717fae11a0..1a80cb202806 100644
--- a/nixos/modules/programs/direnv.nix
+++ b/nixos/modules/programs/direnv.nix
@@ -32,15 +32,6 @@ in {
       the hiding of direnv logging
     '');
 
-    persistDerivations =
-      (lib.mkEnableOption (lib.mdDoc ''
-        setting keep-derivations and keep-outputs to true
-        to prevent shells from getting garbage collected
-      ''))
-      // {
-        default = true;
-      };
-
     loadInNixShell =
       lib.mkEnableOption (lib.mdDoc ''
         loading direnv in `nix-shell` `nix shell` or `nix develop`
@@ -62,6 +53,10 @@ in {
     };
   };
 
+  imports = [
+    (lib.mkRemovedOptionModule ["programs" "direnv" "persistDerivations"] "persistDerivations was removed as it is on longer necessary")
+  ];
+
   config = lib.mkIf cfg.enable {
 
     programs = {
@@ -87,11 +82,6 @@ in {
       '';
     };
 
-    nix.settings = lib.mkIf cfg.persistDerivations {
-      keep-outputs = true;
-      keep-derivations = true;
-    };
-
     environment = {
       systemPackages =
         if cfg.loadInNixShell then [cfg.package]
diff --git a/nixos/modules/programs/ecryptfs.nix b/nixos/modules/programs/ecryptfs.nix
new file mode 100644
index 000000000000..63c1a3ad4419
--- /dev/null
+++ b/nixos/modules/programs/ecryptfs.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.ecryptfs;
+
+in {
+  options.programs.ecryptfs = {
+    enable = mkEnableOption (lib.mdDoc "ecryptfs setuid mount wrappers");
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers = {
+
+      "mount.ecryptfs_private" = {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${lib.getBin pkgs.ecryptfs}/bin/mount.ecryptfs_private";
+      };
+      "umount.ecryptfs_private" = {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${lib.getBin pkgs.ecryptfs}/bin/umount.ecryptfs_private";
+      };
+
+    };
+  };
+}
diff --git a/nixos/modules/programs/environment.nix b/nixos/modules/programs/environment.nix
index 3fbda153e0b4..324b19184747 100644
--- a/nixos/modules/programs/environment.nix
+++ b/nixos/modules/programs/environment.nix
@@ -37,13 +37,10 @@ in
     environment.profileRelativeSessionVariables =
       { PATH = [ "/bin" ];
         INFOPATH = [ "/info" "/share/info" ];
-        KDEDIRS = [ "" ];
-        QT_PLUGIN_PATH = [ "/lib/qt4/plugins" "/lib/kde4/plugins" ];
         QTWEBKIT_PLUGIN_PATH = [ "/lib/mozilla/plugins/" ];
         GTK_PATH = [ "/lib/gtk-2.0" "/lib/gtk-3.0" "/lib/gtk-4.0" ];
         XDG_CONFIG_DIRS = [ "/etc/xdg" ];
         XDG_DATA_DIRS = [ "/share" ];
-        MOZ_PLUGIN_PATH = [ "/lib/mozilla/plugins" ];
         LIBEXEC_PATH = [ "/lib/libexec" ];
       };
 
diff --git a/nixos/modules/programs/fish.nix b/nixos/modules/programs/fish.nix
index c85097f45e92..b500b8f24b2c 100644
--- a/nixos/modules/programs/fish.nix
+++ b/nixos/modules/programs/fish.nix
@@ -258,16 +258,13 @@ in
             preferLocalBuild = true;
             allowSubstitutes = false;
           };
-          generateCompletions = package: pkgs.runCommand
-            "${package.name}_fish-completions"
-            (
-              {
-                inherit package;
-                preferLocalBuild = true;
-                allowSubstitutes = false;
-              }
-              // optionalAttrs (package ? meta.priority) { meta.priority = package.meta.priority; }
-            )
+          generateCompletions = package: pkgs.runCommandLocal
+            ( with lib.strings; let
+                storeLength = stringLength storeDir + 34; # Nix' StorePath::HashLen + 2 for the separating slash and dash
+                pathName = substring storeLength (stringLength package - storeLength) package;
+              in (package.name or pathName) + "_fish-completions")
+            ( { inherit package; } //
+              optionalAttrs (package ? meta.priority) { meta.priority = package.meta.priority; })
             ''
               mkdir -p $out
               if [ -d $package/share/man ]; then
diff --git a/nixos/modules/programs/hyprland.nix b/nixos/modules/programs/hyprland.nix
index e0ee5b6bd2a4..638dfb98e8ab 100644
--- a/nixos/modules/programs/hyprland.nix
+++ b/nixos/modules/programs/hyprland.nix
@@ -7,9 +7,7 @@ with lib; let
   cfg = config.programs.hyprland;
 
   finalPortalPackage = cfg.portalPackage.override {
-    hyprland-share-picker = pkgs.hyprland-share-picker.override {
-      hyprland = cfg.finalPackage;
-    };
+    hyprland = cfg.finalPackage;
   };
 in
 {
diff --git a/nixos/modules/programs/streamdeck-ui.nix b/nixos/modules/programs/streamdeck-ui.nix
index 4c055029e39b..220f0a35f162 100644
--- a/nixos/modules/programs/streamdeck-ui.nix
+++ b/nixos/modules/programs/streamdeck-ui.nix
@@ -24,7 +24,7 @@ in
   config = mkIf cfg.enable {
     environment.systemPackages = with pkgs; [
       cfg.package
-      (mkIf cfg.autoStart (makeAutostartItem { name = "streamdeck-ui"; package = cfg.package; }))
+      (mkIf cfg.autoStart (makeAutostartItem { name = "streamdeck-ui-noui"; package = cfg.package; }))
     ];
 
     services.udev.packages = [ cfg.package ];
diff --git a/nixos/modules/programs/tmux.nix b/nixos/modules/programs/tmux.nix
index 4f452f1d7f9b..0d1c7c9cdf0f 100644
--- a/nixos/modules/programs/tmux.nix
+++ b/nixos/modules/programs/tmux.nix
@@ -52,6 +52,8 @@ let
     set  -s escape-time       ${toString cfg.escapeTime}
     set  -g history-limit     ${toString cfg.historyLimit}
 
+    ${cfg.extraConfigBeforePlugins}
+
     ${lib.optionalString (cfg.plugins != []) ''
     # Run plugins
     ${lib.concatMapStringsSep "\n" (x: "run-shell ${x.rtp}") cfg.plugins}
@@ -108,10 +110,18 @@ in {
         description = lib.mdDoc "Time in milliseconds for which tmux waits after an escape is input.";
       };
 
+      extraConfigBeforePlugins = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Additional contents of /etc/tmux.conf, to be run before sourcing plugins.
+        '';
+        type = types.lines;
+      };
+
       extraConfig = mkOption {
         default = "";
         description = lib.mdDoc ''
-          Additional contents of /etc/tmux.conf
+          Additional contents of /etc/tmux.conf, to be run after sourcing plugins.
         '';
         type = types.lines;
       };
diff --git a/nixos/modules/programs/yazi.nix b/nixos/modules/programs/yazi.nix
new file mode 100644
index 000000000000..973f5c0122c2
--- /dev/null
+++ b/nixos/modules/programs/yazi.nix
@@ -0,0 +1,53 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.yazi;
+
+  settingsFormat = pkgs.formats.toml { };
+
+  names = [ "yazi" "theme" "keymap" ];
+in
+{
+  options.programs.yazi = {
+    enable = lib.mkEnableOption (lib.mdDoc "yazi terminal file manager");
+
+    package = lib.mkPackageOptionMD pkgs "yazi" { };
+
+    settings = lib.mkOption {
+      type = with lib.types; submodule {
+        options = lib.listToAttrs (map
+          (name: lib.nameValuePair name (lib.mkOption {
+            inherit (settingsFormat) type;
+            default = { };
+            description = lib.mdDoc ''
+              Configuration included in `${name}.toml`.
+
+              See https://github.com/sxyazi/yazi/blob/v${cfg.package.version}/config/docs/${name}.md for documentation.
+            '';
+          }))
+          names);
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Configuration included in `$YAZI_CONFIG_HOME`.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment = {
+      systemPackages = [ cfg.package ];
+      variables.YAZI_CONFIG_HOME = "/etc/yazi/";
+      etc = lib.attrsets.mergeAttrsList (map
+        (name: lib.optionalAttrs (cfg.settings.${name} != { }) {
+          "yazi/${name}.toml".source = settingsFormat.generate "${name}.toml" cfg.settings.${name};
+        })
+        names);
+    };
+  };
+  meta = {
+    maintainers = with lib.maintainers; [ linsui ];
+    # The version of the package is used in the doc.
+    buildDocsInSandbox = false;
+  };
+}
diff --git a/nixos/modules/programs/yubikey-touch-detector.nix b/nixos/modules/programs/yubikey-touch-detector.nix
new file mode 100644
index 000000000000..9a0d107f73c9
--- /dev/null
+++ b/nixos/modules/programs/yubikey-touch-detector.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs, ... }:
+let cfg = config.programs.yubikey-touch-detector;
+in {
+  options = {
+    programs.yubikey-touch-detector = {
+      enable = lib.mkEnableOption "yubikey-touch-detector";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.packages = [ pkgs.yubikey-touch-detector ];
+
+    systemd.user.services.yubikey-touch-detector = {
+      path = [ pkgs.gnupg ];
+      wantedBy = [ "graphical-session.target" ];
+    };
+    systemd.user.sockets.yubikey-touch-detector = {
+      wantedBy = [ "sockets.target" ];
+    };
+  };
+}
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 45014ed3c68e..408c515044c8 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -124,6 +124,9 @@ in
       See https://www.isc.org/blogs/isc-dhcp-eol/ for details.
       Please switch to a different implementation like kea or dnsmasq.
     '')
+    (mkRemovedOptionModule [ "services" "tedicross" ] ''
+      The corresponding package was broken and removed from nixpkgs.
+    '')
 
     # Do NOT add any option renames here, see top of the file
   ];
diff --git a/nixos/modules/security/acme/default.md b/nixos/modules/security/acme/default.md
index 8ff97b55f685..31548ad181a7 100644
--- a/nixos/modules/security/acme/default.md
+++ b/nixos/modules/security/acme/default.md
@@ -189,7 +189,7 @@ security.acme.defaults.email = "admin+acme@example.com";
 security.acme.certs."example.com" = {
   domain = "*.example.com";
   dnsProvider = "rfc2136";
-  credentialsFile = "/var/lib/secrets/certs.secret";
+  environmentFile = "/var/lib/secrets/certs.secret";
   # We don't need to wait for propagation since this is a local DNS server
   dnsPropagationCheck = false;
 };
@@ -256,7 +256,7 @@ security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
 security.acme.defaults = {
   dnsProvider = "rfc2136";
-  credentialsFile = "/var/lib/secrets/certs.secret";
+  environmentFile = "/var/lib/secrets/certs.secret";
   # We don't need to wait for propagation since this is a local DNS server
   dnsPropagationCheck = false;
 };
diff --git a/nixos/modules/security/acme/default.nix b/nixos/modules/security/acme/default.nix
index 8aee71c864d0..92bed172f452 100644
--- a/nixos/modules/security/acme/default.nix
+++ b/nixos/modules/security/acme/default.nix
@@ -1,6 +1,8 @@
 { config, lib, pkgs, options, ... }:
 with lib;
 let
+
+
   cfg = config.security.acme;
   opt = options.security.acme;
   user = if cfg.useRoot then "root" else "acme";
@@ -14,6 +16,36 @@ let
   mkAccountHash = acmeServer: data: mkHash "${toString acmeServer} ${data.keyType} ${data.email}";
   accountDirRoot = "/var/lib/acme/.lego/accounts/";
 
+  lockdir = "/run/acme/";
+  concurrencyLockfiles = map (n: "${toString n}.lock") (lib.range 1 cfg.maxConcurrentRenewals);
+  # Assign elements of `baseList` to each element of `needAssignmentList`, until the latter is exhausted.
+  # returns: [{fst = "element of baseList"; snd = "element of needAssignmentList"}]
+  roundRobinAssign = baseList: needAssignmentList:
+    if baseList == [] then []
+    else _rrCycler baseList baseList needAssignmentList;
+  _rrCycler = with builtins; origBaseList: workingBaseList: needAssignmentList:
+    if (workingBaseList == [] || needAssignmentList == [])
+    then []
+    else
+      [{ fst = head workingBaseList; snd = head needAssignmentList;}] ++
+      _rrCycler origBaseList (if (tail workingBaseList == []) then origBaseList else tail workingBaseList) (tail needAssignmentList);
+  attrsToList = mapAttrsToList (attrname: attrval: {name = attrname; value = attrval;});
+  # for an AttrSet `funcsAttrs` having functions as values, apply single arguments from
+  # `argsList` to them in a round-robin manner.
+  # Returns an attribute set with the applied functions as values.
+  roundRobinApplyAttrs = funcsAttrs: argsList: lib.listToAttrs (map (x: {inherit (x.snd) name; value = x.snd.value x.fst;}) (roundRobinAssign argsList (attrsToList funcsAttrs)));
+  wrapInFlock = lockfilePath: script:
+    # explainer: https://stackoverflow.com/a/60896531
+    ''
+      exec {LOCKFD}> ${lockfilePath}
+      echo "Waiting to acquire lock ${lockfilePath}"
+      ${pkgs.flock}/bin/flock ''${LOCKFD} || exit 1
+      echo "Acquired lock ${lockfilePath}"
+    ''
+    + script + "\n"
+    + ''echo "Releasing lock ${lockfilePath}"  # only released after process exit'';
+
+
   # There are many services required to make cert renewals work.
   # They all follow a common structure:
   #   - They inherit this commonServiceConfig
@@ -31,6 +63,7 @@ let
     ProtectSystem = "strict";
     ReadWritePaths = [
       "/var/lib/acme"
+      lockdir
     ];
     PrivateTmp = true;
 
@@ -118,7 +151,8 @@ let
       # We don't want this to run every time a renewal happens
       RemainAfterExit = true;
 
-      # These StateDirectory entries negate the need for tmpfiles
+      # StateDirectory entries are a cleaner, service-level mechanism
+      # for dealing with persistent service data
       StateDirectory = [ "acme" "acme/.lego" "acme/.lego/accounts" ];
       StateDirectoryMode = 755;
       WorkingDirectory = "/var/lib/acme";
@@ -127,6 +161,25 @@ let
       ExecStart = "+" + (pkgs.writeShellScript "acme-fixperms" script);
     };
   };
+  lockfilePrepareService = {
+    description = "Manage lock files for acme services";
+
+    # ensure all required lock files exist, but none more
+    script = ''
+      GLOBIGNORE="${concatStringsSep ":" concurrencyLockfiles}"
+      rm -f *
+      unset GLOBIGNORE
+
+      xargs touch <<< "${toString concurrencyLockfiles}"
+    '';
+
+    serviceConfig = commonServiceConfig // {
+      # We don't want this to run every time a renewal happens
+      RemainAfterExit = true;
+      WorkingDirectory = lockdir;
+    };
+  };
+
 
   certToConfig = cert: data: let
     acmeServer = data.server;
@@ -229,10 +282,10 @@ let
       };
     };
 
-    selfsignService = {
+    selfsignService = lockfileName: {
       description = "Generate self-signed certificate for ${cert}";
-      after = [ "acme-selfsigned-ca.service" "acme-fixperms.service" ];
-      requires = [ "acme-selfsigned-ca.service" "acme-fixperms.service" ];
+      after = [ "acme-selfsigned-ca.service" "acme-fixperms.service" ] ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
+      requires = [ "acme-selfsigned-ca.service" "acme-fixperms.service" ] ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
 
       path = with pkgs; [ minica ];
 
@@ -256,7 +309,7 @@ let
       # Working directory will be /tmp
       # minica will output to a folder sharing the name of the first domain
       # in the list, which will be ${data.domain}
-      script = ''
+      script = (if (lockfileName == null) then lib.id else wrapInFlock "${lockdir}${lockfileName}") ''
         minica \
           --ca-key ca/key.pem \
           --ca-cert ca/cert.pem \
@@ -277,10 +330,10 @@ let
       '';
     };
 
-    renewService = {
+    renewService = lockfileName: {
       description = "Renew ACME certificate for ${cert}";
-      after = [ "network.target" "network-online.target" "acme-fixperms.service" "nss-lookup.target" ] ++ selfsignedDeps;
-      wants = [ "network-online.target" "acme-fixperms.service" ] ++ selfsignedDeps;
+      after = [ "network.target" "network-online.target" "acme-fixperms.service" "nss-lookup.target" ] ++ selfsignedDeps ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
+      wants = [ "network-online.target" "acme-fixperms.service" ] ++ selfsignedDeps ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
 
       # https://github.com/NixOS/nixpkgs/pull/81371#issuecomment-605526099
       wantedBy = optionals (!config.boot.isContainer) [ "multi-user.target" ];
@@ -309,8 +362,14 @@ let
           "/var/lib/acme/.lego/${cert}/${certDir}:/tmp/certificates"
         ];
 
-        # Only try loading the credentialsFile if the dns challenge is enabled
-        EnvironmentFile = mkIf useDns data.credentialsFile;
+        # Only try loading the environmentFile if the dns challenge is enabled
+        EnvironmentFile = mkIf useDns data.environmentFile;
+
+        Environment = mkIf useDns
+          (mapAttrsToList (k: v: ''"${k}=%d/${k}"'') data.credentialFiles);
+
+        LoadCredential = mkIf useDns
+          (mapAttrsToList (k: v: "${k}:${v}") data.credentialFiles);
 
         # Run as root (Prefixed with +)
         ExecStartPost = "+" + (pkgs.writeShellScript "acme-postrun" ''
@@ -329,7 +388,7 @@ let
       };
 
       # Working directory will be /tmp
-      script = ''
+      script = (if (lockfileName == null) then lib.id else wrapInFlock "${lockdir}${lockfileName}") ''
         ${optionalString data.enableDebugLogs "set -x"}
         set -euo pipefail
 
@@ -443,6 +502,10 @@ let
       defaultText = if isDefaults then default else literalExpression "config.security.acme.defaults.${name}";
     };
   in {
+    imports = [
+      (mkRenamedOptionModule [ "credentialsFile" ] [ "environmentFile" ])
+    ];
+
     options = {
       validMinDays = mkOption {
         type = types.int;
@@ -554,9 +617,9 @@ let
         '';
       };
 
-      credentialsFile = mkOption {
+      environmentFile = mkOption {
         type = types.nullOr types.path;
-        inherit (defaultAndText "credentialsFile" null) default defaultText;
+        inherit (defaultAndText "environmentFile" null) default defaultText;
         description = lib.mdDoc ''
           Path to an EnvironmentFile for the cert's service containing any required and
           optional environment variables for your selected dnsProvider.
@@ -566,6 +629,24 @@ let
         example = "/var/src/secrets/example.org-route53-api-token";
       };
 
+      credentialFiles = mkOption {
+        type = types.attrsOf (types.path);
+        inherit (defaultAndText "credentialFiles" {}) default defaultText;
+        description = lib.mdDoc ''
+          Environment variables suffixed by "_FILE" to set for the cert's service
+          for your selected dnsProvider.
+          To find out what values you need to set, consult the documentation at
+          <https://go-acme.github.io/lego/dns/> for the corresponding dnsProvider.
+          This allows to securely pass credential files to lego by leveraging systemd
+          credentials.
+        '';
+        example = literalExpression ''
+          {
+            "RFC2136_TSIG_SECRET_FILE" = "/run/secrets/tsig-secret-example.org";
+          }
+        '';
+      };
+
       dnsPropagationCheck = mkOption {
         type = types.bool;
         inherit (defaultAndText "dnsPropagationCheck" true) default defaultText;
@@ -755,6 +836,17 @@ in {
           }
         '';
       };
+      maxConcurrentRenewals = mkOption {
+        default = 5;
+        type = types.int;
+        description = lib.mdDoc ''
+          Maximum number of concurrent certificate generation or renewal jobs. All other
+          jobs will queue and wait running jobs to finish. Reduces the system load of
+          certificate generation.
+
+          Set to `0` to allow unlimited number of concurrent job runs."
+          '';
+      };
     };
   };
 
@@ -865,6 +957,13 @@ in {
             `security.acme.certs.${cert}.listenHTTP` must be provided.
           '';
         }
+        {
+          assertion = all (hasSuffix "_FILE") (attrNames data.credentialFiles);
+          message = ''
+            Option `security.acme.certs.${cert}.credentialFiles` can only be
+            used for variables suffixed by "_FILE".
+          '';
+        }
       ]) cfg.certs));
 
       users.users.acme = {
@@ -875,12 +974,28 @@ in {
 
       users.groups.acme = {};
 
-      systemd.services = {
-        "acme-fixperms" = userMigrationService;
-      } // (mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewService) certConfigs)
+      # for lock files, still use tmpfiles as they should better reside in /run
+      systemd.tmpfiles.rules = [
+        "d ${lockdir} 0700 ${user} - - -"
+        "Z ${lockdir} 0700 ${user} - - -"
+      ];
+
+      systemd.services = let
+        renewServiceFunctions = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewService) certConfigs;
+        renewServices =  if cfg.maxConcurrentRenewals > 0
+          then roundRobinApplyAttrs renewServiceFunctions concurrencyLockfiles
+          else mapAttrs (_: f: f null) renewServiceFunctions;
+        selfsignServiceFunctions = mapAttrs' (cert: conf: nameValuePair "acme-selfsigned-${cert}" conf.selfsignService) certConfigs;
+        selfsignServices = if cfg.maxConcurrentRenewals > 0
+          then roundRobinApplyAttrs selfsignServiceFunctions concurrencyLockfiles
+          else mapAttrs (_: f: f null) selfsignServiceFunctions;
+        in
+        { "acme-fixperms" = userMigrationService; }
+        // (optionalAttrs (cfg.maxConcurrentRenewals > 0) {"acme-lockfiles" = lockfilePrepareService; })
+        // renewServices
         // (optionalAttrs (cfg.preliminarySelfsigned) ({
         "acme-selfsigned-ca" = selfsignCAService;
-      } // (mapAttrs' (cert: conf: nameValuePair "acme-selfsigned-${cert}" conf.selfsignService) certConfigs)));
+      } // selfsignServices));
 
       systemd.timers = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewTimer) certConfigs;
 
diff --git a/nixos/modules/security/pam.nix b/nixos/modules/security/pam.nix
index a431817fe1bb..d83259ccbebc 100644
--- a/nixos/modules/security/pam.nix
+++ b/nixos/modules/security/pam.nix
@@ -1303,7 +1303,7 @@ in
 
     security.pam.enableEcryptfs = mkEnableOption (lib.mdDoc "eCryptfs PAM module (mounting ecryptfs home directory on login)");
     security.pam.enableFscrypt = mkEnableOption (lib.mdDoc ''
-      Enables fscrypt to automatically unlock directories with the user's login password.
+      fscrypt to automatically unlock directories with the user's login password.
 
       This also enables a service at security.pam.services.fscrypt which is used by
       fscrypt to verify the user's password when setting up a new protector. If you
diff --git a/nixos/modules/security/sudo-rs.nix b/nixos/modules/security/sudo-rs.nix
new file mode 100644
index 000000000000..6b8f09a8d3d0
--- /dev/null
+++ b/nixos/modules/security/sudo-rs.nix
@@ -0,0 +1,296 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) sudo sudo-rs;
+
+  cfg = config.security.sudo-rs;
+
+  enableSSHAgentAuth =
+    with config.security;
+    pam.enableSSHAgentAuth && pam.sudo.sshAgentAuth;
+
+  usingMillersSudo = cfg.package.pname == sudo.pname;
+  usingSudoRs = cfg.package.pname == sudo-rs.pname;
+
+  toUserString = user: if (isInt user) then "#${toString user}" else "${user}";
+  toGroupString = group: if (isInt group) then "%#${toString group}" else "%${group}";
+
+  toCommandOptionsString = options:
+    "${concatStringsSep ":" options}${optionalString (length options != 0) ":"} ";
+
+  toCommandsString = commands:
+    concatStringsSep ", " (
+      map (command:
+        if (isString command) then
+          command
+        else
+          "${toCommandOptionsString command.options}${command.command}"
+      ) commands
+    );
+
+in
+
+{
+
+  ###### interface
+
+  options.security.sudo-rs = {
+
+    defaultOptions = mkOption {
+      type = with types; listOf str;
+      default = optional usingMillersSudo "SETENV";
+      defaultText = literalMD ''
+        `[ "SETENV" ]` if using the default `sudo` implementation
+      '';
+      description = mdDoc ''
+        Options used for the default rules, granting `root` and the
+        `wheel` group permission to run any command as any user.
+      '';
+    };
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Whether to enable the {command}`sudo` command, which
+        allows non-root users to execute commands as root.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.sudo-rs;
+      defaultText = literalExpression "pkgs.sudo-rs";
+      description = mdDoc ''
+        Which package to use for `sudo`.
+      '';
+    };
+
+    wheelNeedsPassword = mkOption {
+      type = types.bool;
+      default = true;
+      description = mdDoc ''
+        Whether users of the `wheel` group must
+        provide a password to run commands as super user via {command}`sudo`.
+      '';
+      };
+
+    execWheelOnly = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Only allow members of the `wheel` group to execute sudo by
+        setting the executable's permissions accordingly.
+        This prevents users that are not members of `wheel` from
+        exploiting vulnerabilities in sudo such as CVE-2021-3156.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.lines;
+      # Note: if syntax errors are detected in this file, the NixOS
+      # configuration will fail to build.
+      description = mdDoc ''
+        This string contains the contents of the
+        {file}`sudoers` file.
+      '';
+    };
+
+    extraRules = mkOption {
+      description = mdDoc ''
+        Define specific rules to be in the {file}`sudoers` file.
+        More specific rules should come after more general ones in order to
+        yield the expected behavior. You can use mkBefore/mkAfter to ensure
+        this is the case when configuration options are merged.
+      '';
+      default = [];
+      example = literalExpression ''
+        [
+          # Allow execution of any command by all users in group sudo,
+          # requiring a password.
+          { groups = [ "sudo" ]; commands = [ "ALL" ]; }
+
+          # Allow execution of "/home/root/secret.sh" by user `backup`, `database`
+          # and the group with GID `1006` without a password.
+          { users = [ "backup" "database" ]; groups = [ 1006 ];
+            commands = [ { command = "/home/root/secret.sh"; options = [ "SETENV" "NOPASSWD" ]; } ]; }
+
+          # Allow all users of group `bar` to run two executables as user `foo`
+          # with arguments being pre-set.
+          { groups = [ "bar" ]; runAs = "foo";
+            commands =
+              [ "/home/baz/cmd1.sh hello-sudo"
+                  { command = '''/home/baz/cmd2.sh ""'''; options = [ "SETENV" ]; } ]; }
+        ]
+      '';
+      type = with types; listOf (submodule {
+        options = {
+          users = mkOption {
+            type = with types; listOf (either str int);
+            description = mdDoc ''
+              The usernames / UIDs this rule should apply for.
+            '';
+            default = [];
+          };
+
+          groups = mkOption {
+            type = with types; listOf (either str int);
+            description = mdDoc ''
+              The groups / GIDs this rule should apply for.
+            '';
+            default = [];
+          };
+
+          host = mkOption {
+            type = types.str;
+            default = "ALL";
+            description = mdDoc ''
+              For what host this rule should apply.
+            '';
+          };
+
+          runAs = mkOption {
+            type = with types; str;
+            default = "ALL:ALL";
+            description = mdDoc ''
+              Under which user/group the specified command is allowed to run.
+
+              A user can be specified using just the username: `"foo"`.
+              It is also possible to specify a user/group combination using `"foo:bar"`
+              or to only allow running as a specific group with `":bar"`.
+            '';
+          };
+
+          commands = mkOption {
+            description = mdDoc ''
+              The commands for which the rule should apply.
+            '';
+            type = with types; listOf (either str (submodule {
+
+              options = {
+                command = mkOption {
+                  type = with types; str;
+                  description = mdDoc ''
+                    A command being either just a path to a binary to allow any arguments,
+                    the full command with arguments pre-set or with `""` used as the argument,
+                    not allowing arguments to the command at all.
+                  '';
+                };
+
+                options = mkOption {
+                  type = with types; listOf (enum [ "NOPASSWD" "PASSWD" "NOEXEC" "EXEC" "SETENV" "NOSETENV" "LOG_INPUT" "NOLOG_INPUT" "LOG_OUTPUT" "NOLOG_OUTPUT" ]);
+                  description = mdDoc ''
+                    Options for running the command. Refer to the [sudo manual](https://www.sudo.ws/man/1.7.10/sudoers.man.html).
+                  '';
+                  default = [];
+                };
+              };
+
+            }));
+          };
+        };
+      });
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = mdDoc ''
+        Extra configuration text appended to {file}`sudoers`.
+      '';
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    security.sudo-rs.extraRules =
+      let
+        defaultRule = { users ? [], groups ? [], opts ? [] }: [ {
+          inherit users groups;
+          commands = [ {
+            command = "ALL";
+            options = opts ++ cfg.defaultOptions;
+          } ];
+        } ];
+      in mkMerge [
+        # This is ordered before users' `mkBefore` rules,
+        # so as not to introduce unexpected changes.
+        (mkOrder 400 (defaultRule { users = [ "root" ]; }))
+
+        # This is ordered to show before (most) other rules, but
+        # late-enough for a user to `mkBefore` it.
+        (mkOrder 600 (defaultRule {
+          groups = [ "wheel" ];
+          opts = (optional (!cfg.wheelNeedsPassword) "NOPASSWD");
+        }))
+      ];
+
+    security.sudo-rs.configFile = concatStringsSep "\n" (filter (s: s != "") [
+      ''
+        # Don't edit this file. Set the NixOS options ‘security.sudo-rs.configFile’
+        # or ‘security.sudo-rs.extraRules’ instead.
+      ''
+      (optionalString enableSSHAgentAuth ''
+        # Keep SSH_AUTH_SOCK so that pam_ssh_agent_auth.so can do its magic.
+        Defaults env_keep+=SSH_AUTH_SOCK
+      '')
+      (concatStringsSep "\n" (
+        lists.flatten (
+          map (
+            rule: optionals (length rule.commands != 0) [
+              (map (user: "${toUserString user}	${rule.host}=(${rule.runAs})	${toCommandsString rule.commands}") rule.users)
+              (map (group: "${toGroupString group}	${rule.host}=(${rule.runAs})	${toCommandsString rule.commands}") rule.groups)
+            ]
+          ) cfg.extraRules
+        )
+      ) + "\n")
+      (optionalString (cfg.extraConfig != "") ''
+        # extraConfig
+        ${cfg.extraConfig}
+      '')
+    ]);
+
+    security.wrappers = let
+      owner = "root";
+      group = if cfg.execWheelOnly then "wheel" else "root";
+      setuid = true;
+      permissions = if cfg.execWheelOnly then "u+rx,g+x" else "u+rx,g+x,o+x";
+    in {
+      sudo = {
+        source = "${cfg.package.out}/bin/sudo";
+        inherit owner group setuid permissions;
+      };
+      # sudo-rs does not yet ship a sudoedit (as of v0.2.0)
+      sudoedit = mkIf usingMillersSudo {
+        source = "${cfg.package.out}/bin/sudoedit";
+        inherit owner group setuid permissions;
+      };
+    };
+
+    environment.systemPackages = [ sudo ];
+
+    security.pam.services.sudo = { sshAgentAuth = true; usshAuth = true; };
+    security.pam.services.sudo-i = mkIf usingSudoRs
+      { sshAgentAuth = true; usshAuth = true; };
+
+    environment.etc.sudoers =
+      { source =
+          pkgs.runCommand "sudoers"
+          {
+            src = pkgs.writeText "sudoers-in" cfg.configFile;
+            preferLocalBuild = true;
+          }
+          "${pkgs.buildPackages."${cfg.package.pname}"}/bin/visudo -f $src -c && cp $src $out";
+        mode = "0440";
+      };
+
+  };
+
+  meta.maintainers = [ lib.maintainers.nicoo ];
+
+}
diff --git a/nixos/modules/security/sudo.nix b/nixos/modules/security/sudo.nix
index 9ac91bd0d368..d225442773c6 100644
--- a/nixos/modules/security/sudo.nix
+++ b/nixos/modules/security/sudo.nix
@@ -192,6 +192,10 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.package.pname != "sudo-rs";
+        message = "The NixOS `sudo` module does not work with `sudo-rs` yet."; }
+    ];
 
     # We `mkOrder 600` so that the default rule shows up first, but there is
     # still enough room for a user to `mkBefore` it.
diff --git a/nixos/modules/security/wrappers/default.nix b/nixos/modules/security/wrappers/default.nix
index 12255d8392fe..ad65f80bb2ca 100644
--- a/nixos/modules/security/wrappers/default.nix
+++ b/nixos/modules/security/wrappers/default.nix
@@ -5,8 +5,8 @@ let
 
   parentWrapperDir = dirOf wrapperDir;
 
-  securityWrapper = pkgs.callPackage ./wrapper.nix {
-    inherit parentWrapperDir;
+  securityWrapper = sourceProg : pkgs.callPackage ./wrapper.nix {
+    inherit sourceProg;
   };
 
   fileModeType =
@@ -91,8 +91,7 @@ let
     , ...
     }:
     ''
-      cp ${securityWrapper}/bin/security-wrapper "$wrapperDir/${program}"
-      echo -n "${source}" > "$wrapperDir/${program}.real"
+      cp ${securityWrapper source}/bin/security-wrapper "$wrapperDir/${program}"
 
       # Prevent races
       chmod 0000 "$wrapperDir/${program}"
@@ -119,8 +118,7 @@ let
     , ...
     }:
     ''
-      cp ${securityWrapper}/bin/security-wrapper "$wrapperDir/${program}"
-      echo -n "${source}" > "$wrapperDir/${program}.real"
+      cp ${securityWrapper source}/bin/security-wrapper "$wrapperDir/${program}"
 
       # Prevent races
       chmod 0000 "$wrapperDir/${program}"
@@ -248,11 +246,13 @@ in
       export PATH="${wrapperDir}:$PATH"
     '';
 
-    security.apparmor.includes."nixos/security.wrappers" = ''
-      include "${pkgs.apparmorRulesFromClosure { name="security.wrappers"; } [
-        securityWrapper
+    security.apparmor.includes = lib.mapAttrs' (wrapName: wrap: lib.nameValuePair
+     "nixos/security.wrappers/${wrapName}" ''
+      include "${pkgs.apparmorRulesFromClosure { name="security.wrappers.${wrapName}"; } [
+        (securityWrapper wrap.source)
       ]}"
-    '';
+      mrpx ${wrap.source},
+    '') wrappers;
 
     ###### wrappers activation script
     system.activationScripts.wrappers =
diff --git a/nixos/modules/security/wrappers/wrapper.c b/nixos/modules/security/wrappers/wrapper.c
index 17776a97af81..2cf1727a31c8 100644
--- a/nixos/modules/security/wrappers/wrapper.c
+++ b/nixos/modules/security/wrappers/wrapper.c
@@ -17,6 +17,10 @@
 #include <syscall.h>
 #include <byteswap.h>
 
+#ifndef SOURCE_PROG
+#error SOURCE_PROG should be defined via preprocessor commandline
+#endif
+
 // aborts when false, printing the failed expression
 #define ASSERT(expr) ((expr) ? (void) 0 : assert_failure(#expr))
 // aborts when returns non-zero, printing the failed expression and errno
@@ -24,10 +28,6 @@
 
 extern char **environ;
 
-// The WRAPPER_DIR macro is supplied at compile time so that it cannot
-// be changed at runtime
-static char *wrapper_dir = WRAPPER_DIR;
-
 // Wrapper debug variable name
 static char *wrapper_debug = "WRAPPER_DEBUG";
 
@@ -151,115 +151,20 @@ static int make_caps_ambient(const char *self_path) {
     return 0;
 }
 
-int readlink_malloc(const char *p, char **ret) {
-    size_t l = FILENAME_MAX+1;
-    int r;
-
-    for (;;) {
-        char *c = calloc(l, sizeof(char));
-        if (!c) {
-            return -ENOMEM;
-        }
-
-        ssize_t n = readlink(p, c, l-1);
-        if (n < 0) {
-            r = -errno;
-            free(c);
-            return r;
-        }
-
-        if ((size_t) n < l-1) {
-            c[n] = 0;
-            *ret = c;
-            return 0;
-        }
-
-        free(c);
-        l *= 2;
-    }
-}
-
 int main(int argc, char **argv) {
     ASSERT(argc >= 1);
-    char *self_path = NULL;
-    int self_path_size = readlink_malloc("/proc/self/exe", &self_path);
-    if (self_path_size < 0) {
-        fprintf(stderr, "cannot readlink /proc/self/exe: %s", strerror(-self_path_size));
-    }
-
-    unsigned int ruid, euid, suid, rgid, egid, sgid;
-    MUSTSUCCEED(getresuid(&ruid, &euid, &suid));
-    MUSTSUCCEED(getresgid(&rgid, &egid, &sgid));
-
-    // If true, then we did not benefit from setuid privilege escalation,
-    // where the original uid is still in ruid and different from euid == suid.
-    int didnt_suid = (ruid == euid) && (euid == suid);
-    // If true, then we did not benefit from setgid privilege escalation
-    int didnt_sgid = (rgid == egid) && (egid == sgid);
-
-
-    // Make sure that we are being executed from the right location,
-    // i.e., `safe_wrapper_dir'.  This is to prevent someone from creating
-    // hard link `X' from some other location, along with a false
-    // `X.real' file, to allow arbitrary programs from being executed
-    // with elevated capabilities.
-    int len = strlen(wrapper_dir);
-    if (len > 0 && '/' == wrapper_dir[len - 1])
-      --len;
-    ASSERT(!strncmp(self_path, wrapper_dir, len));
-    ASSERT('/' == wrapper_dir[0]);
-    ASSERT('/' == self_path[len]);
-
-    // If we got privileges with the fs set[ug]id bit, check that the privilege we
-    // got matches the one one we expected, ie that our effective uid/gid
-    // matches the uid/gid of `self_path`. This ensures that we were executed as
-    // `self_path', and not, say, as some other setuid program.
-    // We don't check that if we did not benefit from the set[ug]id bit, as
-    // can be the case in nosuid mounts or user namespaces.
-    struct stat st;
-    ASSERT(lstat(self_path, &st) != -1);
-
-    // if the wrapper gained privilege with suid, check that we got the uid of the file owner
-    ASSERT(!((st.st_mode & S_ISUID) && !didnt_suid) || (st.st_uid == euid));
-    // if the wrapper gained privilege with sgid, check that we got the gid of the file group
-    ASSERT(!((st.st_mode & S_ISGID) && !didnt_sgid) || (st.st_gid == egid));
-    // same, but with suid instead of euid
-    ASSERT(!((st.st_mode & S_ISUID) && !didnt_suid) || (st.st_uid == suid));
-    ASSERT(!((st.st_mode & S_ISGID) && !didnt_sgid) || (st.st_gid == sgid));
-
-    // And, of course, we shouldn't be writable.
-    ASSERT(!(st.st_mode & (S_IWGRP | S_IWOTH)));
-
-    // Read the path of the real (wrapped) program from <self>.real.
-    char real_fn[PATH_MAX + 10];
-    int real_fn_size = snprintf(real_fn, sizeof(real_fn), "%s.real", self_path);
-    ASSERT(real_fn_size < sizeof(real_fn));
-
-    int fd_self = open(real_fn, O_RDONLY);
-    ASSERT(fd_self != -1);
-
-    char source_prog[PATH_MAX];
-    len = read(fd_self, source_prog, PATH_MAX);
-    ASSERT(len != -1);
-    ASSERT(len < sizeof(source_prog));
-    ASSERT(len > 0);
-    source_prog[len] = 0;
-
-    close(fd_self);
 
     // Read the capabilities set on the wrapper and raise them in to
     // the ambient set so the program we're wrapping receives the
     // capabilities too!
-    if (make_caps_ambient(self_path) != 0) {
-        free(self_path);
+    if (make_caps_ambient("/proc/self/exe") != 0) {
         return 1;
     }
-    free(self_path);
 
-    execve(source_prog, argv, environ);
+    execve(SOURCE_PROG, argv, environ);
     
     fprintf(stderr, "%s: cannot run `%s': %s\n",
-        argv[0], source_prog, strerror(errno));
+        argv[0], SOURCE_PROG, strerror(errno));
 
     return 1;
 }
diff --git a/nixos/modules/security/wrappers/wrapper.nix b/nixos/modules/security/wrappers/wrapper.nix
index e3620fb222d2..aec43412404e 100644
--- a/nixos/modules/security/wrappers/wrapper.nix
+++ b/nixos/modules/security/wrappers/wrapper.nix
@@ -1,4 +1,4 @@
-{ stdenv, linuxHeaders, parentWrapperDir, debug ? false }:
+{ stdenv, linuxHeaders, sourceProg, debug ? false }:
 # For testing:
 # $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { parentWrapperDir = "/run/wrappers"; debug = true; }'
 stdenv.mkDerivation {
@@ -7,7 +7,7 @@ stdenv.mkDerivation {
   dontUnpack = true;
   hardeningEnable = [ "pie" ];
   CFLAGS = [
-    ''-DWRAPPER_DIR="${parentWrapperDir}"''
+    ''-DSOURCE_PROG="${sourceProg}"''
   ] ++ (if debug then [
     "-Werror" "-Og" "-g"
   ] else [
diff --git a/nixos/modules/services/audio/tts.nix b/nixos/modules/services/audio/tts.nix
index 1a355c8ee39f..0d93224ec030 100644
--- a/nixos/modules/services/audio/tts.nix
+++ b/nixos/modules/services/audio/tts.nix
@@ -134,6 +134,7 @@ in
           ProtectProc = "invisible";
           ProcSubset = "pid";
           RestrictAddressFamilies = [
+            "AF_UNIX"
             "AF_INET"
             "AF_INET6"
           ];
diff --git a/nixos/modules/services/backup/duplicati.nix b/nixos/modules/services/backup/duplicati.nix
index 007396ebfc9b..9b422635e7f0 100644
--- a/nixos/modules/services/backup/duplicati.nix
+++ b/nixos/modules/services/backup/duplicati.nix
@@ -10,6 +10,8 @@ in
     services.duplicati = {
       enable = mkEnableOption (lib.mdDoc "Duplicati");
 
+      package = mkPackageOptionMD pkgs "duplicati" { };
+
       port = mkOption {
         default = 8200;
         type = types.port;
@@ -53,7 +55,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    environment.systemPackages = [ pkgs.duplicati ];
+    environment.systemPackages = [ cfg.package ];
 
     systemd.services.duplicati = {
       description = "Duplicati backup";
@@ -63,7 +65,7 @@ in
         {
           User = cfg.user;
           Group = "duplicati";
-          ExecStart = "${pkgs.duplicati}/bin/duplicati-server --webservice-interface=${cfg.interface} --webservice-port=${toString cfg.port} --server-datafolder=${cfg.dataDir}";
+          ExecStart = "${cfg.package}/bin/duplicati-server --webservice-interface=${cfg.interface} --webservice-port=${toString cfg.port} --server-datafolder=${cfg.dataDir}";
           Restart = "on-failure";
         }
         (mkIf (cfg.dataDir == "/var/lib/duplicati") {
@@ -83,4 +85,3 @@ in
 
   };
 }
-
diff --git a/nixos/modules/services/backup/restic.nix b/nixos/modules/services/backup/restic.nix
index 1620770e5b56..78220e99c3d1 100644
--- a/nixos/modules/services/backup/restic.nix
+++ b/nixos/modules/services/backup/restic.nix
@@ -260,6 +260,16 @@ in
             Restic package to use.
           '';
         };
+
+        createWrapper = lib.mkOption {
+          type = lib.types.bool;
+          default = true;
+          description = ''
+            Whether to generate and add a script to the system path, that has the same environment variables set
+            as the systemd service. This can be used to e.g. mount snapshots or perform other opterations, without
+            having to manually specify most options.
+          '';
+        };
       };
     }));
     default = { };
@@ -316,7 +326,8 @@ in
           in
           nameValuePair "restic-backups-${name}" ({
             environment = {
-              RESTIC_CACHE_DIR = "%C/restic-backups-${name}";
+              # not %C, because that wouldn't work in the wrapper script
+              RESTIC_CACHE_DIR = "/var/cache/restic-backups-${name}";
               RESTIC_PASSWORD_FILE = backup.passwordFile;
               RESTIC_REPOSITORY = backup.repository;
               RESTIC_REPOSITORY_FILE = backup.repositoryFile;
@@ -331,8 +342,10 @@ in
                 nameValuePair (rcloneAttrToConf name) (toRcloneVal value)
               )
               backup.rcloneConfig);
-            path = [ pkgs.openssh ];
+            path = [ config.programs.ssh.package ];
             restartIfChanged = false;
+            wants = [ "network-online.target" ];
+            after = [ "network-online.target" ];
             serviceConfig = {
               Type = "oneshot";
               ExecStart = (optionals (backupPaths != "") [ "${resticCmd} backup ${concatStringsSep " " (backup.extraBackupArgs ++ excludeFlags)} ${backupPaths}" ])
@@ -376,5 +389,22 @@ in
           timerConfig = backup.timerConfig;
         })
         config.services.restic.backups;
+
+    # generate wrapper scripts, as described in the createWrapper option
+    environment.systemPackages = lib.mapAttrsToList (name: backup: let
+      extraOptions = lib.concatMapStrings (arg: " -o ${arg}") backup.extraOptions;
+      resticCmd = "${backup.package}/bin/restic${extraOptions}";
+    in pkgs.writeShellScriptBin "restic-${name}" ''
+      set -a  # automatically export variables
+      ${lib.optionalString (backup.environmentFile != null) "source ${backup.environmentFile}"}
+      # set same environment variables as the systemd service
+      ${lib.pipe config.systemd.services."restic-backups-${name}".environment [
+        (lib.filterAttrs (_: v: v != null))
+        (lib.mapAttrsToList (n: v: "${n}=${v}"))
+        (lib.concatStringsSep "\n")
+      ]}
+
+      exec ${resticCmd} $@
+    '') (lib.filterAttrs (_: v: v.createWrapper) config.services.restic.backups);
   };
 }
diff --git a/nixos/modules/services/continuous-integration/buildbot/master.nix b/nixos/modules/services/continuous-integration/buildbot/master.nix
index 595374ea1e5b..b4b997201c8f 100644
--- a/nixos/modules/services/continuous-integration/buildbot/master.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/master.nix
@@ -272,7 +272,13 @@ in {
         Group = cfg.group;
         WorkingDirectory = cfg.home;
         # NOTE: call twistd directly with stdout logging for systemd
-        ExecStart = "${python.pkgs.twisted}/bin/twistd -o --nodaemon --pidfile= --logfile - --python ${tacFile}";
+        ExecStart = "${python.pkgs.twisted}/bin/twistd -o --nodaemon --pidfile= --logfile - --python ${cfg.buildbotDir}/buildbot.tac";
+        # To reload on upgrade, set the following in your configuration:
+        # systemd.services.buildbot-master.reloadIfChanged = true;
+        ExecReload = [
+          "${pkgs.coreutils}/bin/ln -sf ${tacFile} ${cfg.buildbotDir}/buildbot.tac"
+          "${pkgs.coreutils}/bin/kill -HUP $MAINPID"
+        ];
       };
     };
   };
diff --git a/nixos/modules/services/continuous-integration/github-runner/options.nix b/nixos/modules/services/continuous-integration/github-runner/options.nix
index ce8809213724..f2887c7711b3 100644
--- a/nixos/modules/services/continuous-integration/github-runner/options.nix
+++ b/nixos/modules/services/continuous-integration/github-runner/options.nix
@@ -208,4 +208,12 @@ with lib;
     '';
     default = null;
   };
+
+  nodeRuntimes = mkOption {
+    type = with types; nonEmptyListOf (enum [ "node16" "node20" ]);
+    default = [ "node20" ];
+    description = mdDoc ''
+      List of Node.js runtimes the runner should support.
+    '';
+  };
 }
diff --git a/nixos/modules/services/continuous-integration/github-runner/service.nix b/nixos/modules/services/continuous-integration/github-runner/service.nix
index 55df83362cb6..535df7f68e07 100644
--- a/nixos/modules/services/continuous-integration/github-runner/service.nix
+++ b/nixos/modules/services/continuous-integration/github-runner/service.nix
@@ -22,6 +22,7 @@ with lib;
 
 let
   workDir = if cfg.workDir == null then runtimeDir else cfg.workDir;
+  package = cfg.package.override { inherit (cfg) nodeRuntimes; };
 in
 {
   description = "GitHub Actions runner";
@@ -47,7 +48,7 @@ in
 
   serviceConfig = mkMerge [
     {
-      ExecStart = "${cfg.package}/bin/Runner.Listener run --startuptype service";
+      ExecStart = "${package}/bin/Runner.Listener run --startuptype service";
 
       # Does the following, sequentially:
       # - If the module configuration or the token has changed, purge the state directory,
@@ -149,7 +150,7 @@ in
               else
                 args+=(--token "$token")
               fi
-              ${cfg.package}/bin/Runner.Listener configure "''${args[@]}"
+              ${package}/bin/Runner.Listener configure "''${args[@]}"
               # Move the automatically created _diag dir to the logs dir
               mkdir -p  "$STATE_DIRECTORY/_diag"
               cp    -r  "$STATE_DIRECTORY/_diag/." "$LOGS_DIRECTORY/"
diff --git a/nixos/modules/services/continuous-integration/hail.nix b/nixos/modules/services/continuous-integration/hail.nix
deleted file mode 100644
index 62e8b8077c07..000000000000
--- a/nixos/modules/services/continuous-integration/hail.nix
+++ /dev/null
@@ -1,61 +0,0 @@
-{ config, lib, pkgs, ...}:
-
-with lib;
-
-let
-  cfg = config.services.hail;
-in {
-
-
-  ###### interface
-
-  options.services.hail = {
-    enable = mkOption {
-      type = types.bool;
-      default = false;
-      description = lib.mdDoc ''
-        Enables the Hail Auto Update Service. Hail can automatically deploy artifacts
-        built by a Hydra Continuous Integration server. A common use case is to provide
-        continuous deployment for single services or a full NixOS configuration.'';
-    };
-    profile = mkOption {
-      type = types.str;
-      default = "hail-profile";
-      description = lib.mdDoc "The name of the Nix profile used by Hail.";
-    };
-    hydraJobUri = mkOption {
-      type = types.str;
-      description = lib.mdDoc "The URI of the Hydra Job.";
-    };
-    netrc = mkOption {
-      type = types.nullOr types.path;
-      description = lib.mdDoc "The netrc file to use when fetching data from Hydra.";
-      default = null;
-    };
-    package = mkOption {
-      type = types.package;
-      default = pkgs.haskellPackages.hail;
-      defaultText = literalExpression "pkgs.haskellPackages.hail";
-      description = lib.mdDoc "Hail package to use.";
-    };
-  };
-
-
-  ###### implementation
-
-  config = mkIf cfg.enable {
-    systemd.services.hail = {
-      description = "Hail Auto Update Service";
-      wants = [ "network-online.target" ];
-      wantedBy = [ "multi-user.target" ];
-      path = with pkgs; [ nix ];
-      environment = {
-        HOME = "/var/lib/empty";
-      };
-      serviceConfig = {
-        ExecStart = "${cfg.package}/bin/hail --profile ${cfg.profile} --job-uri ${cfg.hydraJobUri}"
-          + lib.optionalString (cfg.netrc != null) " --netrc-file ${cfg.netrc}";
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/continuous-integration/woodpecker/agents.nix b/nixos/modules/services/continuous-integration/woodpecker/agents.nix
index cc5b903afd59..3b883c72ff07 100644
--- a/nixos/modules/services/continuous-integration/woodpecker/agents.nix
+++ b/nixos/modules/services/continuous-integration/woodpecker/agents.nix
@@ -35,6 +35,16 @@ let
         '';
       };
 
+      path = lib.mkOption {
+        type = lib.types.listOf lib.types.package;
+        default = [ ];
+        example = [ "" ];
+        description = lib.mdDoc ''
+          Additional packages that should be added to the agent's `PATH`.
+          Mostly useful for the `local` backend.
+        '';
+      };
+
       environmentFile = lib.mkOption {
         type = lib.types.listOf lib.types.path;
         default = [ ];
@@ -94,7 +104,7 @@ let
           "-/etc/localtime"
         ];
       };
-      inherit (agentCfg) environment;
+      inherit (agentCfg) environment path;
     };
   };
 in
@@ -106,28 +116,41 @@ in
       agents = lib.mkOption {
         default = { };
         type = lib.types.attrsOf agentModule;
-        example = {
-          docker = {
-            environment = {
-              WOODPECKER_SERVER = "localhost:9000";
-              WOODPECKER_BACKEND = "docker";
-              DOCKER_HOST = "unix:///run/podman/podman.sock";
+        example = lib.literalExpression ''
+          {
+            podman = {
+              environment = {
+                WOODPECKER_SERVER = "localhost:9000";
+                WOODPECKER_BACKEND = "docker";
+                DOCKER_HOST = "unix:///run/podman/podman.sock";
+              };
+
+              extraGroups = [ "podman" ];
+
+              environmentFile = [ "/run/secrets/woodpecker/agent-secret.txt" ];
             };
 
-            extraGroups = [ "docker" ];
+            exec = {
+              environment = {
+                WOODPECKER_SERVER = "localhost:9000";
+                WOODPECKER_BACKEND = "local";
+              };
 
-            environmentFile = "/run/secrets/woodpecker/agent-secret.txt";
-          };
+              environmentFile = [ "/run/secrets/woodpecker/agent-secret.txt" ];
 
-          exec = {
-            environment = {
-              WOODPECKER_SERVER = "localhost:9000";
-              WOODPECKER_BACKEND = "exec";
+              path = [
+                # Needed to clone repos
+                git
+                git-lfs
+                woodpecker-plugin-git
+                # Used by the runner as the default shell
+                bash
+                # Most likely to be used in pipeline definitions
+                coreutils
+              ];
             };
-
-            environmentFile = "/run/secrets/woodpecker/agent-secret.txt";
-          };
-        };
+          }
+        '';
         description = lib.mdDoc "woodpecker-agents configurations";
       };
     };
diff --git a/nixos/modules/services/databases/surrealdb.nix b/nixos/modules/services/databases/surrealdb.nix
index 050a5336cb4c..28bd97cd731e 100644
--- a/nixos/modules/services/databases/surrealdb.nix
+++ b/nixos/modules/services/databases/surrealdb.nix
@@ -47,17 +47,13 @@ in {
         example = 8000;
       };
 
-      userNamePath = mkOption {
-        type = types.path;
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--allow-all" "--auth" "--user root" "--pass root" ];
         description = lib.mdDoc ''
-          Path to read the username from.
-        '';
-      };
-
-      passwordPath = mkOption {
-        type = types.path;
-        description = lib.mdDoc ''
-          Path to read the password from.
+          Specify a list of additional command line flags,
+          which get escaped and are then passed to surrealdb.
         '';
       };
     };
@@ -73,19 +69,8 @@ in {
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
 
-      script = ''
-        ${cfg.package}/bin/surreal start \
-          --user $(${pkgs.systemd}/bin/systemd-creds cat SURREALDB_USERNAME) \
-          --pass $(${pkgs.systemd}/bin/systemd-creds cat SURREALDB_PASSWORD) \
-          --bind ${cfg.host}:${toString cfg.port} \
-          -- ${cfg.dbPath}
-      '';
       serviceConfig = {
-        LoadCredential = [
-          "SURREALDB_USERNAME:${cfg.userNamePath}"
-          "SURREALDB_PASSWORD:${cfg.passwordPath}"
-        ];
-
+        ExecStart = "${cfg.package}/bin/surreal start --bind ${cfg.host}:${toString cfg.port} ${escapeShellArgs cfg.extraFlags} -- ${cfg.dbPath}";
         DynamicUser = true;
         Restart = "on-failure";
         StateDirectory = "surrealdb";
diff --git a/nixos/modules/services/editors/emacs.md b/nixos/modules/services/editors/emacs.md
index 72364b295144..9db1bd594175 100644
--- a/nixos/modules/services/editors/emacs.md
+++ b/nixos/modules/services/editors/emacs.md
@@ -286,11 +286,11 @@ The server should now be ready to serve Emacs clients.
 
 ### Starting the client {#module-services-emacs-starting-client}
 
-Ensure that the emacs server is enabled, either by customizing the
+Ensure that the Emacs server is enabled, either by customizing the
 {var}`server-mode` variable, or by adding
 `(server-start)` to {file}`~/.emacs`.
 
-To connect to the emacs daemon, run one of the following:
+To connect to the Emacs daemon, run one of the following:
 ```
 emacsclient FILENAME
 emacsclient --create-frame  # opens a new frame (window)
@@ -339,24 +339,10 @@ This will add the symlink
 
 ## Configuring Emacs {#module-services-emacs-configuring}
 
-The Emacs init file should be changed to load the extension packages at
-startup:
+If you want to only use extension packages from Nixpkgs, you can add
+`(setq package-archives nil)` to your init file.
 
-::: {.example #module-services-emacs-package-initialisation}
-### Package initialization in `.emacs`
-
-```
-(require 'package)
-
-;; optional. makes unpure packages archives unavailable
-(setq package-archives nil)
-
-(setq package-enable-at-startup nil)
-(package-initialize)
-```
-:::
-
-After the declarative emacs package configuration has been tested,
+After the declarative Emacs package configuration has been tested,
 previously downloaded packages can be cleaned up by removing
 {file}`~/.emacs.d/elpa` (do make a backup first, in case you
 forgot a package).
diff --git a/nixos/modules/services/finance/odoo.nix b/nixos/modules/services/finance/odoo.nix
index fee9af574b5d..eec7c4e30cc4 100644
--- a/nixos/modules/services/finance/odoo.nix
+++ b/nixos/modules/services/finance/odoo.nix
@@ -31,6 +31,12 @@ in
         description = lib.mdDoc ''
           Odoo configuration settings. For more details see <https://www.odoo.com/documentation/15.0/administration/install/deploy.html>
         '';
+        example = literalExpression ''
+          options = {
+            db_user = "odoo";
+            db_password="odoo";
+          };
+        '';
       };
 
       domain = mkOption {
@@ -112,11 +118,11 @@ in
     services.postgresql = {
       enable = true;
 
+      ensureDatabases = [ "odoo" ];
       ensureUsers = [{
         name = "odoo";
         ensurePermissions = { "DATABASE odoo" = "ALL PRIVILEGES"; };
       }];
-      ensureDatabases = [ "odoo" ];
     };
   });
 }
diff --git a/nixos/modules/services/games/xonotic.nix b/nixos/modules/services/games/xonotic.nix
new file mode 100644
index 000000000000..c84347ddc981
--- /dev/null
+++ b/nixos/modules/services/games/xonotic.nix
@@ -0,0 +1,198 @@
+{ config
+, pkgs
+, lib
+, ...
+}:
+
+let
+  cfg = config.services.xonotic;
+
+  serverCfg = pkgs.writeText "xonotic-server.cfg" (
+    toString cfg.prependConfig
+      + "\n"
+      + builtins.concatStringsSep "\n" (
+        lib.mapAttrsToList (key: option:
+          let
+            escape = s: lib.escape [ "\"" ] s;
+            quote = s: "\"${s}\"";
+
+            toValue = x: quote (escape (toString x));
+
+            value = (if lib.isList option then
+              builtins.concatStringsSep
+                " "
+                (builtins.map (x: toValue x) option)
+            else
+              toValue option
+            );
+          in
+          "${key} ${value}"
+        ) cfg.settings
+      )
+      + "\n"
+      + toString cfg.appendConfig
+  );
+in
+
+{
+  options.services.xonotic = {
+    enable = lib.mkEnableOption (lib.mdDoc "Xonotic dedicated server");
+
+    package = lib.mkPackageOption pkgs "xonotic-dedicated" {};
+
+    openFirewall = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open the firewall for TCP and UDP on the specified port.
+      '';
+    };
+
+    dataDir = lib.mkOption {
+      type = lib.types.path;
+      readOnly = true;
+      default = "/var/lib/xonotic";
+      description = lib.mdDoc ''
+        Data directory.
+      '';
+    };
+
+    settings = lib.mkOption {
+      description = lib.mdDoc ''
+        Generates the `server.cfg` file. Refer to [upstream's example][0] for
+        details.
+
+        [0]: https://gitlab.com/xonotic/xonotic/-/blob/master/server/server.cfg
+      '';
+      default = {};
+      type = lib.types.submodule {
+        freeformType = with lib.types; let
+          scalars = oneOf [ singleLineStr int float ];
+        in
+        attrsOf (oneOf [ scalars (nonEmptyListOf scalars) ]);
+
+        options.sv_public = lib.mkOption {
+          type = lib.types.int;
+          default = 0;
+          example = [ (-1) 1 ];
+          description = lib.mdDoc ''
+            Controls whether the server will be publicly listed.
+          '';
+        };
+
+        options.hostname = lib.mkOption {
+          type = lib.types.singleLineStr;
+          default = "Xonotic $g_xonoticversion Server";
+          description = lib.mdDoc ''
+            The name that will appear in the server list. `$g_xonoticversion`
+            gets replaced with the current version.
+          '';
+        };
+
+        options.sv_motd = lib.mkOption {
+          type = lib.types.singleLineStr;
+          default = "";
+          description = lib.mdDoc ''
+            Text displayed when players join the server.
+          '';
+        };
+
+        options.sv_termsofservice_url = lib.mkOption {
+          type = lib.types.singleLineStr;
+          default = "";
+          description = lib.mdDoc ''
+            URL for the Terms of Service for playing on your server.
+          '';
+        };
+
+        options.maxplayers = lib.mkOption {
+          type = lib.types.int;
+          default = 16;
+          description = lib.mdDoc ''
+            Number of player slots on the server, including spectators.
+          '';
+        };
+
+        options.net_address = lib.mkOption {
+          type = lib.types.singleLineStr;
+          default = "0.0.0.0";
+          description = lib.mdDoc ''
+            The address Xonotic will listen on.
+          '';
+        };
+
+        options.port = lib.mkOption {
+          type = lib.types.port;
+          default = 26000;
+          description = lib.mdDoc ''
+            The port Xonotic will listen on.
+          '';
+        };
+      };
+    };
+
+    # Still useful even though we're using RFC 42 settings because *some* keys
+    # can be repeated.
+    appendConfig = lib.mkOption {
+      type = with lib.types; nullOr lines;
+      default = null;
+      description = lib.mdDoc ''
+        Literal text to insert at the end of `server.cfg`.
+      '';
+    };
+
+    # Certain changes need to happen at the beginning of the file.
+    prependConfig = lib.mkOption {
+      type = with lib.types; nullOr lines;
+      default = null;
+      description = lib.mdDoc ''
+        Literal text to insert at the start of `server.cfg`.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.xonotic = {
+      description = "Xonotic server";
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        # Required or else it tries to write the lock file into the nix store
+        HOME = cfg.dataDir;
+      };
+
+      serviceConfig = {
+        DynamicUser = true;
+        User = "xonotic";
+        StateDirectory = "xonotic";
+        ExecStart = "${cfg.package}/bin/xonotic-dedicated";
+
+        # Symlink the configuration from the nix store to where Xonotic actually
+        # looks for it
+        ExecStartPre = [
+          "${pkgs.coreutils}/bin/mkdir -p ${cfg.dataDir}/.xonotic/data"
+          ''
+            ${pkgs.coreutils}/bin/ln -sf ${serverCfg} \
+              ${cfg.dataDir}/.xonotic/data/server.cfg
+          ''
+        ];
+
+        # Cargo-culted from search results about writing Xonotic systemd units
+        ExecReload = "${pkgs.util-linux}/bin/kill -HUP $MAINPID";
+
+        Restart = "on-failure";
+        RestartSec = 10;
+        StartLimitBurst = 5;
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = lib.mkIf cfg.openFirewall [
+      cfg.settings.port
+    ];
+    networking.firewall.allowedUDPPorts = lib.mkIf cfg.openFirewall [
+      cfg.settings.port
+    ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ CobaltCause ];
+}
diff --git a/nixos/modules/services/hardware/auto-cpufreq.nix b/nixos/modules/services/hardware/auto-cpufreq.nix
index fd2e03ef12f5..9c69ba8920f3 100644
--- a/nixos/modules/services/hardware/auto-cpufreq.nix
+++ b/nixos/modules/services/hardware/auto-cpufreq.nix
@@ -15,8 +15,7 @@ in {
         description = lib.mdDoc ''
           Configuration for `auto-cpufreq`.
 
-          See its [example configuration file] for supported settings.
-          [example configuration file]: https://github.com/AdnanHodzic/auto-cpufreq/blob/master/auto-cpufreq.conf-example
+          The available options can be found in [the example configuration file](https://github.com/AdnanHodzic/auto-cpufreq/blob/v${pkgs.auto-cpufreq.version}/auto-cpufreq.conf-example).
           '';
 
         default = {};
@@ -35,6 +34,7 @@ in {
         wantedBy = [ "multi-user.target" ];
         path = with pkgs; [ bash coreutils ];
 
+        serviceConfig.WorkingDirectory = "";
         serviceConfig.ExecStart = [
           ""
           "${lib.getExe pkgs.auto-cpufreq} --daemon --config ${cfgFile}"
@@ -42,4 +42,10 @@ in {
       };
     };
   };
+
+  # uses attributes of the linked package
+  meta = {
+    buildDocsInSandbox = false;
+    maintainers = with lib.maintainers; [ nicoo ];
+  };
 }
diff --git a/nixos/modules/services/hardware/openrgb.nix b/nixos/modules/services/hardware/openrgb.nix
index 310615ecc539..13b1d07e53b7 100644
--- a/nixos/modules/services/hardware/openrgb.nix
+++ b/nixos/modules/services/hardware/openrgb.nix
@@ -17,7 +17,14 @@ in {
 
     motherboard = mkOption {
       type = types.nullOr (types.enum [ "amd" "intel" ]);
-      default = null;
+      default = if config.hardware.cpu.intel.updateMicrocode then "intel"
+        else if config.hardware.cpu.amd.updateMicrocode then "amd"
+        else null;
+      defaultText = literalMD ''
+        if config.hardware.cpu.intel.updateMicrocode then "intel"
+        else if config.hardware.cpu.amd.updateMicrocode then "amd"
+        else null;
+      '';
       description = lib.mdDoc "CPU family of motherboard. Allows for addition motherboard i2c support.";
     };
 
diff --git a/nixos/modules/services/home-automation/zigbee2mqtt.nix b/nixos/modules/services/home-automation/zigbee2mqtt.nix
index 796de3a491e4..6b5bd8a0d9bb 100644
--- a/nixos/modules/services/home-automation/zigbee2mqtt.nix
+++ b/nixos/modules/services/home-automation/zigbee2mqtt.nix
@@ -66,9 +66,10 @@ in
         server = mkDefault "mqtt://localhost:1883";
       };
       serial.port = mkDefault "/dev/ttyACM0";
-      # reference device configuration, that is kept in a separate file
+      # reference device/group configuration, that is kept in a separate file
       # to prevent it being overwritten in the units ExecStartPre script
       devices = mkDefault "devices.yaml";
+      groups = mkDefault "groups.yaml";
     };
 
     systemd.services.zigbee2mqtt = {
diff --git a/nixos/modules/services/logging/logrotate.nix b/nixos/modules/services/logging/logrotate.nix
index 342ac5ec6e04..ba1445f08397 100644
--- a/nixos/modules/services/logging/logrotate.nix
+++ b/nixos/modules/services/logging/logrotate.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, ... }:
+{ config, lib, pkgs, utils, ... }:
 
 with lib;
 
@@ -220,6 +220,12 @@ in
           in this case you can disable the failing check with this option.
         '';
       };
+
+      extraArgs = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        default = [];
+        description = "Additional command line arguments to pass on logrotate invocation";
+      };
     };
   };
 
@@ -231,7 +237,7 @@ in
       serviceConfig = {
         Restart = "no";
         User = "root";
-        ExecStart = "${pkgs.logrotate}/sbin/logrotate ${mailOption} ${cfg.configFile}";
+        ExecStart = "${pkgs.logrotate}/sbin/logrotate ${utils.escapeSystemdExecArgs cfg.extraArgs} ${mailOption} ${cfg.configFile}";
       };
     };
     systemd.services.logrotate-checkconf = {
@@ -240,7 +246,7 @@ in
       serviceConfig = {
         Type = "oneshot";
         RemainAfterExit = true;
-        ExecStart = "${pkgs.logrotate}/sbin/logrotate --debug ${cfg.configFile}";
+        ExecStart = "${pkgs.logrotate}/sbin/logrotate ${utils.escapeSystemdExecArgs cfg.extraArgs} --debug ${cfg.configFile}";
       };
     };
   };
diff --git a/nixos/modules/services/mail/listmonk.nix b/nixos/modules/services/mail/listmonk.nix
index 251362fdd89d..11b2a5186229 100644
--- a/nixos/modules/services/mail/listmonk.nix
+++ b/nixos/modules/services/mail/listmonk.nix
@@ -54,7 +54,7 @@ let
 
       smtp = mkOption {
         type = listOf (submodule {
-          freeformType = with types; attrsOf (oneOf [ str int bool ]);
+          freeformType = with types; attrsOf anything;
 
           options = {
             enabled = mkEnableOption (lib.mdDoc "this SMTP server for listmonk");
@@ -86,7 +86,7 @@ let
       # TODO: refine this type based on the smtp one.
       "bounce.mailboxes" = mkOption {
         type = listOf
-          (submodule { freeformType = with types; oneOf [ str int bool ]; });
+          (submodule { freeformType = with types; listOf (attrsOf anything); });
         default = [ ];
         description = lib.mdDoc "List of bounce mailboxes";
       };
diff --git a/nixos/modules/services/mail/stalwart-mail.nix b/nixos/modules/services/mail/stalwart-mail.nix
new file mode 100644
index 000000000000..eb87d9f6f695
--- /dev/null
+++ b/nixos/modules/services/mail/stalwart-mail.nix
@@ -0,0 +1,106 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.stalwart-mail;
+  configFormat = pkgs.formats.toml { };
+  configFile = configFormat.generate "stalwart-mail.toml" cfg.settings;
+  dataDir = "/var/lib/stalwart-mail";
+
+in {
+  options.services.stalwart-mail = {
+    enable = mkEnableOption (mdDoc "the Stalwart all-in-one email server");
+    package = mkPackageOptionMD pkgs "stalwart-mail" { };
+
+    settings = mkOption {
+      inherit (configFormat) type;
+      default = { };
+      description = mdDoc ''
+        Configuration options for the Stalwart email server.
+        See <https://stalw.art/docs/category/configuration> for available options.
+
+        By default, the module is configured to store everything locally.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Default config: all local
+    services.stalwart-mail.settings = {
+      global.tracing.method = mkDefault "stdout";
+      global.tracing.level = mkDefault "info";
+      queue.path = mkDefault "${dataDir}/queue";
+      report.path = mkDefault "${dataDir}/reports";
+      store.db.path = mkDefault "${dataDir}/data/index.sqlite3";
+      store.blob.type = mkDefault "local";
+      store.blob.local.path = mkDefault "${dataDir}/data/blobs";
+      resolver.type = mkDefault "system";
+    };
+
+    systemd.services.stalwart-mail = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "local-fs.target" "network.target" ];
+
+      preStart = ''
+        mkdir -p ${dataDir}/{queue,reports,data/blobs}
+      '';
+
+      serviceConfig = {
+        ExecStart =
+          "${cfg.package}/bin/stalwart-mail --config=${configFile}";
+
+        # Base from template resources/systemd/stalwart-mail.service
+        Type = "simple";
+        LimitNOFILE = 65536;
+        KillMode = "process";
+        KillSignal = "SIGINT";
+        Restart = "on-failure";
+        RestartSec = 5;
+        StandardOutput = "syslog";
+        StandardError = "syslog";
+        SyslogIdentifier = "stalwart-mail";
+
+        DynamicUser = true;
+        User = "stalwart-mail";
+        StateDirectory = "stalwart-mail";
+
+        # Bind standard privileged ports
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+
+        # Hardening
+        DeviceAllow = [ "" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = false;  # incompatible with CAP_NET_BIND_SERVICE
+        ProcSubset = "pid";
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+        UMask = "0077";
+      };
+    };
+
+    # Make admin commands available in the shell
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta = {
+    maintainers = with maintainers; [ happysalada pacien ];
+  };
+}
diff --git a/nixos/modules/services/matrix/mautrix-telegram.nix b/nixos/modules/services/matrix/mautrix-telegram.nix
index 17032ed808e9..97a6ba858e00 100644
--- a/nixos/modules/services/matrix/mautrix-telegram.nix
+++ b/nixos/modules/services/matrix/mautrix-telegram.nix
@@ -159,7 +159,6 @@ in {
         if [ ! -f '${registrationFile}' ]; then
           ${pkgs.mautrix-telegram}/bin/mautrix-telegram \
             --generate-registration \
-            --base-config='${pkgs.mautrix-telegram}/${pkgs.mautrix-telegram.pythonModule.sitePackages}/mautrix_telegram/example-config.yaml' \
             --config='${settingsFile}' \
             --registration='${registrationFile}'
         fi
diff --git a/nixos/modules/services/matrix/mautrix-whatsapp.nix b/nixos/modules/services/matrix/mautrix-whatsapp.nix
index 80c85980196f..c4dc48213495 100644
--- a/nixos/modules/services/matrix/mautrix-whatsapp.nix
+++ b/nixos/modules/services/matrix/mautrix-whatsapp.nix
@@ -11,53 +11,47 @@
   settingsFileUnsubstituted = settingsFormat.generate "mautrix-whatsapp-config-unsubstituted.json" cfg.settings;
   settingsFormat = pkgs.formats.json {};
   appservicePort = 29318;
+
+  mkDefaults = lib.mapAttrsRecursive (n: v: lib.mkDefault v);
+  defaultConfig = {
+    homeserver.address = "http://localhost:8448";
+    appservice = {
+      hostname = "[::]";
+      port = appservicePort;
+      database.type = "sqlite3";
+      database.uri = "${dataDir}/mautrix-whatsapp.db";
+      id = "whatsapp";
+      bot.username = "whatsappbot";
+      bot.displayname = "WhatsApp Bridge Bot";
+      as_token = "";
+      hs_token = "";
+    };
+    bridge = {
+      username_template = "whatsapp_{{.}}";
+      displayname_template = "{{if .BusinessName}}{{.BusinessName}}{{else if .PushName}}{{.PushName}}{{else}}{{.JID}}{{end}} (WA)";
+      double_puppet_server_map = {};
+      login_shared_secret_map = {};
+      command_prefix = "!wa";
+      permissions."*" = "relay";
+      relay.enabled = true;
+    };
+    logging = {
+      min_level = "info";
+      writers = lib.singleton {
+        type = "stdout";
+        format = "pretty-colored";
+        time_format = " ";
+      };
+    };
+  };
+
 in {
-  imports = [];
   options.services.mautrix-whatsapp = {
-    enable = lib.mkEnableOption "mautrix-whatsapp, a puppeting/relaybot bridge between Matrix and WhatsApp.";
+    enable = lib.mkEnableOption (lib.mdDoc "mautrix-whatsapp, a puppeting/relaybot bridge between Matrix and WhatsApp.");
 
     settings = lib.mkOption {
       type = settingsFormat.type;
-      default = {
-        appservice = {
-          address = "http://localhost:${toString appservicePort}";
-          hostname = "[::]";
-          port = appservicePort;
-          database = {
-            type = "sqlite3";
-            uri = "${dataDir}/mautrix-whatsapp.db";
-          };
-          id = "whatsapp";
-          bot = {
-            username = "whatsappbot";
-            displayname = "WhatsApp Bridge Bot";
-          };
-          as_token = "";
-          hs_token = "";
-        };
-        bridge = {
-          username_template = "whatsapp_{{.}}";
-          displayname_template = "{{if .BusinessName}}{{.BusinessName}}{{else if .PushName}}{{.PushName}}{{else}}{{.JID}}{{end}} (WA)";
-          double_puppet_server_map = {};
-          login_shared_secret_map = {};
-          command_prefix = "!wa";
-          permissions."*" = "relay";
-          relay.enabled = true;
-        };
-        logging = {
-          min_level = "info";
-          writers = [
-            {
-              type = "stdout";
-              format = "pretty-colored";
-            }
-            {
-              type = "file";
-              format = "json";
-            }
-          ];
-        };
-      };
+      default = defaultConfig;
       description = lib.mdDoc ''
         {file}`config.yaml` configuration as a Nix attribute set.
         Configuration options should match those described in
@@ -117,10 +111,22 @@ in {
   };
 
   config = lib.mkIf cfg.enable {
-    services.mautrix-whatsapp.settings = {
-      homeserver.domain = lib.mkDefault config.services.matrix-synapse.settings.server_name;
+
+    users.users.mautrix-whatsapp = {
+      isSystemUser = true;
+      group = "mautrix-whatsapp";
+      home = dataDir;
+      description = "Mautrix-WhatsApp bridge user";
     };
 
+    users.groups.mautrix-whatsapp = {};
+
+    services.mautrix-whatsapp.settings = lib.mkMerge (map mkDefaults [
+      defaultConfig
+      # Note: this is defined here to avoid the docs depending on `config`
+      { homeserver.domain = config.services.matrix-synapse.settings.server_name; }
+    ]);
+
     systemd.services.mautrix-whatsapp = {
       description = "Mautrix-WhatsApp Service - A WhatsApp bridge for Matrix";
 
@@ -158,10 +164,11 @@ in {
       '';
 
       serviceConfig = {
-        DynamicUser = true;
+        User = "mautrix-whatsapp";
+        Group = "mautrix-whatsapp";
         EnvironmentFile = cfg.environmentFile;
         StateDirectory = baseNameOf dataDir;
-        WorkingDirectory = "${dataDir}";
+        WorkingDirectory = dataDir;
         ExecStart = ''
           ${pkgs.mautrix-whatsapp}/bin/mautrix-whatsapp \
           --config='${settingsFile}' \
diff --git a/nixos/modules/services/matrix/synapse-log_config.yaml b/nixos/modules/services/matrix/synapse-log_config.yaml
deleted file mode 100644
index d85bdd1208f9..000000000000
--- a/nixos/modules/services/matrix/synapse-log_config.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-version: 1
-
-# In systemd's journal, loglevel is implicitly stored, so let's omit it
-# from the message text.
-formatters:
-    journal_fmt:
-        format: '%(name)s: [%(request)s] %(message)s'
-
-filters:
-    context:
-        (): synapse.util.logcontext.LoggingContextFilter
-        request: ""
-
-handlers:
-    journal:
-        class: systemd.journal.JournalHandler
-        formatter: journal_fmt
-        filters: [context]
-        SYSLOG_IDENTIFIER: synapse
-
-root:
-    level: INFO
-    handlers: [journal]
-
-disable_existing_loggers: False
diff --git a/nixos/modules/services/matrix/synapse.nix b/nixos/modules/services/matrix/synapse.nix
index ef69a8adebb0..1354a8cb58b4 100644
--- a/nixos/modules/services/matrix/synapse.nix
+++ b/nixos/modules/services/matrix/synapse.nix
@@ -4,7 +4,7 @@ with lib;
 
 let
   cfg = config.services.matrix-synapse;
-  format = pkgs.formats.yaml {};
+  format = pkgs.formats.yaml { };
 
   # remove null values from the final configuration
   finalSettings = lib.filterAttrsRecursive (_: v: v != null) cfg.settings;
@@ -13,27 +13,28 @@ let
   usePostgresql = cfg.settings.database.name == "psycopg2";
   hasLocalPostgresDB = let args = cfg.settings.database.args; in
     usePostgresql && (!(args ? host) || (elem args.host [ "localhost" "127.0.0.1" "::1" ]));
+  hasWorkers = cfg.workers != { };
+
+  listenerSupportsResource = resource: listener:
+    lib.any ({ names, ... }: builtins.elem resource names) listener.resources;
+
+  clientListener = findFirst
+    (listenerSupportsResource "client")
+    null
+    (cfg.settings.listeners
+      ++ concatMap ({ worker_listeners, ... }: worker_listeners) (attrValues cfg.workers));
 
   registerNewMatrixUser =
     let
-      isIpv6 = x: lib.length (lib.splitString ":" x) > 1;
-      listener =
-        lib.findFirst (
-          listener: lib.any (
-            resource: lib.any (
-              name: name == "client"
-            ) resource.names
-          ) listener.resources
-        ) (lib.last cfg.settings.listeners) cfg.settings.listeners;
-        # FIXME: Handle cases with missing client listener properly,
-        # don't rely on lib.last, this will not work.
+      isIpv6 = hasInfix ":";
 
       # add a tail, so that without any bind_addresses we still have a useable address
-      bindAddress = head (listener.bind_addresses ++ [ "127.0.0.1" ]);
-      listenerProtocol = if listener.tls
+      bindAddress = head (clientListener.bind_addresses ++ [ "127.0.0.1" ]);
+      listenerProtocol = if clientListener.tls
         then "https"
         else "http";
     in
+    assert assertMsg (clientListener != null) "No client listener found in synapse or one of its workers";
     pkgs.writeShellScriptBin "matrix-synapse-register_new_matrix_user" ''
       exec ${cfg.package}/bin/register_new_matrix_user \
         $@ \
@@ -43,7 +44,7 @@ let
             "[${bindAddress}]"
           else
             "${bindAddress}"
-        }:${builtins.toString listener.port}/"
+        }:${builtins.toString clientListener.port}/"
     '';
 
   defaultExtras = [
@@ -68,6 +69,48 @@ let
     extras = wantedExtras;
     inherit (cfg) plugins;
   };
+
+  logConfig = logName: {
+    version = 1;
+    formatters.journal_fmt.format = "%(name)s: [%(request)s] %(message)s";
+    handlers.journal = {
+      class = "systemd.journal.JournalHandler";
+      formatter = "journal_fmt";
+      SYSLOG_IDENTIFIER = logName;
+    };
+    root = {
+      level = "INFO";
+      handlers = [ "journal" ];
+    };
+    disable_existing_loggers = false;
+  };
+  logConfigText = logName:
+    let
+      expr = ''
+        {
+          version = 1;
+          formatters.journal_fmt.format = "%(name)s: [%(request)s] %(message)s";
+          handlers.journal = {
+            class = "systemd.journal.JournalHandler";
+            formatter = "journal_fmt";
+            SYSLOG_IDENTIFIER = "${logName}";
+          };
+          root = {
+            level = "INFO";
+            handlers = [ "journal" ];
+          };
+          disable_existing_loggers = false;
+        };
+      '';
+    in
+    lib.literalMD ''
+      Path to a yaml file generated from this Nix expression:
+
+      ```
+      ${expr}
+      ```
+    '';
+  genLogConfigFile = logName: format.generate "synapse-log-${logName}.yaml" (logConfig logName);
 in {
 
   imports = [
@@ -154,7 +197,108 @@ in {
 
   ];
 
-  options = {
+  options = let
+    listenerType = workerContext: types.submodule {
+      options = {
+        port = mkOption {
+          type = types.port;
+          example = 8448;
+          description = lib.mdDoc ''
+            The port to listen for HTTP(S) requests on.
+          '';
+        };
+
+        bind_addresses = mkOption {
+          type = types.listOf types.str;
+          default = [
+            "::1"
+            "127.0.0.1"
+          ];
+          example = literalExpression ''
+            [
+              "::"
+              "0.0.0.0"
+            ]
+          '';
+          description = lib.mdDoc ''
+            IP addresses to bind the listener to.
+          '';
+        };
+
+        type = mkOption {
+          type = types.enum [
+            "http"
+            "manhole"
+            "metrics"
+            "replication"
+          ];
+          default = "http";
+          example = "metrics";
+          description = lib.mdDoc ''
+            The type of the listener, usually http.
+          '';
+        };
+
+        tls = mkOption {
+          type = types.bool;
+          default = !workerContext;
+          example = false;
+          description = lib.mdDoc ''
+            Whether to enable TLS on the listener socket.
+          '';
+        };
+
+        x_forwarded = mkOption {
+          type = types.bool;
+          default = false;
+          example = true;
+          description = lib.mdDoc ''
+            Use the X-Forwarded-For (XFF) header as the client IP and not the
+            actual client IP.
+          '';
+        };
+
+        resources = mkOption {
+          type = types.listOf (types.submodule {
+            options = {
+              names = mkOption {
+                type = types.listOf (types.enum [
+                  "client"
+                  "consent"
+                  "federation"
+                  "health"
+                  "keys"
+                  "media"
+                  "metrics"
+                  "openid"
+                  "replication"
+                  "static"
+                ]);
+                description = lib.mdDoc ''
+                  List of resources to host on this listener.
+                '';
+                example = [
+                  "client"
+                ];
+              };
+              compress = mkOption {
+                default = false;
+                type = types.bool;
+                description = lib.mdDoc ''
+                  Whether synapse should compress HTTP responses to clients that support it.
+                  This should be disabled if running synapse behind a load balancer
+                  that can do automatic compression.
+                '';
+              };
+            };
+          });
+          description = lib.mdDoc ''
+            List of HTTP resources to serve on this listener.
+          '';
+        };
+      };
+    };
+  in {
     services.matrix-synapse = {
       enable = mkEnableOption (lib.mdDoc "matrix.org synapse");
 
@@ -251,7 +395,7 @@ in {
       };
 
       settings = mkOption {
-        default = {};
+        default = { };
         description = mdDoc ''
           The primary synapse configuration. See the
           [sample configuration](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_config.yaml)
@@ -346,8 +490,8 @@ in {
 
             log_config = mkOption {
               type = types.path;
-              default = ./synapse-log_config.yaml;
-              defaultText = lib.literalExpression "nixos/modules/services/matrix/synapse-log_config.yaml";
+              default = genLogConfigFile "synapse";
+              defaultText = logConfigText "synapse";
               description = lib.mdDoc ''
                 The file that holds the logging configuration.
               '';
@@ -409,120 +553,37 @@ in {
             };
 
             listeners = mkOption {
-              type = types.listOf (types.submodule {
-                options = {
-                  port = mkOption {
-                    type = types.port;
-                    example = 8448;
-                    description = lib.mdDoc ''
-                      The port to listen for HTTP(S) requests on.
-                    '';
-                  };
-
-                  bind_addresses = mkOption {
-                    type = types.listOf types.str;
-                    default = [
-                      "::1"
-                      "127.0.0.1"
-                    ];
-                    example = literalExpression ''
-                    [
-                      "::"
-                      "0.0.0.0"
-                    ]
-                    '';
-                    description = lib.mdDoc ''
-                     IP addresses to bind the listener to.
-                    '';
-                  };
-
-                  type = mkOption {
-                    type = types.enum [
-                      "http"
-                      "manhole"
-                      "metrics"
-                      "replication"
-                    ];
-                    default = "http";
-                    example = "metrics";
-                    description = lib.mdDoc ''
-                      The type of the listener, usually http.
-                    '';
-                  };
-
-                  tls = mkOption {
-                    type = types.bool;
-                    default = true;
-                    example = false;
-                    description = lib.mdDoc ''
-                      Whether to enable TLS on the listener socket.
-                    '';
-                  };
-
-                  x_forwarded = mkOption {
-                    type = types.bool;
-                    default = false;
-                    example = true;
-                    description = lib.mdDoc ''
-                      Use the X-Forwarded-For (XFF) header as the client IP and not the
-                      actual client IP.
-                    '';
-                  };
-
-                  resources = mkOption {
-                    type = types.listOf (types.submodule {
-                      options = {
-                        names = mkOption {
-                          type = types.listOf (types.enum [
-                            "client"
-                            "consent"
-                            "federation"
-                            "keys"
-                            "media"
-                            "metrics"
-                            "openid"
-                            "replication"
-                            "static"
-                          ]);
-                          description = lib.mdDoc ''
-                            List of resources to host on this listener.
-                          '';
-                          example = [
-                            "client"
-                          ];
-                        };
-                        compress = mkOption {
-                          type = types.bool;
-                          description = lib.mdDoc ''
-                            Should synapse compress HTTP responses to clients that support it?
-                            This should be disabled if running synapse behind a load balancer
-                            that can do automatic compression.
-                          '';
-                        };
-                      };
-                    });
-                    description = lib.mdDoc ''
-                      List of HTTP resources to serve on this listener.
-                    '';
-                  };
-                };
-              });
-              default = [ {
+              type = types.listOf (listenerType false);
+              default = [{
                 port = 8008;
                 bind_addresses = [ "127.0.0.1" ];
                 type = "http";
                 tls = false;
                 x_forwarded = true;
-                resources = [ {
+                resources = [{
                   names = [ "client" ];
                   compress = true;
                 } {
                   names = [ "federation" ];
                   compress = false;
-                } ];
-              } ];
+                }];
+              }] ++ lib.optional hasWorkers {
+                port = 9093;
+                bind_addresses = [ "127.0.0.1" ];
+                type = "http";
+                tls = false;
+                x_forwarded = false;
+                resources = [{
+                  names = [ "replication" ];
+                  compress = false;
+                }];
+              };
               description = lib.mdDoc ''
                 List of ports that Synapse should listen on, their purpose and their configuration.
+
+                By default, synapse will be configured for client and federation traffic on port 8008, and
+                for worker replication traffic on port 9093. See [`services.matrix-synapse.workers`](#opt-services.matrix-synapse.workers)
+                for more details.
               '';
             };
 
@@ -534,7 +595,7 @@ in {
               default = if versionAtLeast config.system.stateVersion "18.03"
                 then "psycopg2"
                 else "sqlite3";
-               defaultText = literalExpression ''
+              defaultText = literalExpression ''
                 if versionAtLeast config.system.stateVersion "18.03"
                 then "psycopg2"
                 else "sqlite3"
@@ -551,10 +612,10 @@ in {
                 psycopg2 = "matrix-synapse";
               }.${cfg.settings.database.name};
               defaultText = literalExpression ''
-              {
-                sqlite3 = "''${${options.services.matrix-synapse.dataDir}}/homeserver.db";
-                psycopg2 = "matrix-synapse";
-              }.''${${options.services.matrix-synapse.settings}.database.name};
+                {
+                  sqlite3 = "''${${options.services.matrix-synapse.dataDir}}/homeserver.db";
+                  psycopg2 = "matrix-synapse";
+                }.''${${options.services.matrix-synapse.settings}.database.name};
               '';
               description = lib.mdDoc ''
                 Name of the database when using the psycopg2 backend,
@@ -622,7 +683,7 @@ in {
 
             url_preview_ip_range_whitelist = mkOption {
               type = types.listOf types.str;
-              default = [];
+              default = [ ];
               description = lib.mdDoc ''
                 List of IP address CIDR ranges that the URL preview spider is allowed
                 to access even if they are specified in url_preview_ip_range_blacklist.
@@ -630,8 +691,27 @@ in {
             };
 
             url_preview_url_blacklist = mkOption {
-              type = types.listOf types.str;
-              default = [];
+              # FIXME revert to just `listOf (attrsOf str)` after some time(tm).
+              type = types.listOf (
+                types.coercedTo
+                  types.str
+                  (const (throw ''
+                    Setting `config.services.matrix-synapse.settings.url_preview_url_blacklist`
+                    to a list of strings has never worked. Due to a bug, this was the type accepted
+                    by the module, but in practice it broke on runtime and as a result, no URL
+                    preview worked anywhere if this was set.
+
+                    See https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#url_preview_url_blacklist
+                    on how to configure it properly.
+                  ''))
+                  (types.attrsOf types.str));
+              default = [ ];
+              example = literalExpression ''
+                [
+                  { scheme = "http"; } # no http previews
+                  { netloc = "www.acme.com"; path = "/foo"; } # block http(s)://www.acme.com/foo
+                ]
+              '';
               description = lib.mdDoc ''
                 Optional list of URL matches that the URL preview spider is
                 denied from accessing.
@@ -671,7 +751,7 @@ in {
 
             turn_uris = mkOption {
               type = types.listOf types.str;
-              default = [];
+              default = [ ];
               example = [
                 "turn:turn.example.com:3487?transport=udp"
                 "turn:turn.example.com:3487?transport=tcp"
@@ -708,12 +788,12 @@ in {
                   };
                 };
               });
-              default = [ {
+              default = [{
                 server_name = "matrix.org";
                 verify_keys = {
                   "ed25519:auto" = "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw";
                 };
-              } ];
+              }];
               description = lib.mdDoc ''
                 The trusted servers to download signing keys from.
               '';
@@ -727,13 +807,114 @@ in {
               '';
             };
 
+            redis = lib.mkOption {
+              type = types.submodule {
+                freeformType = format.type;
+                options = {
+                  enabled = lib.mkOption {
+                    type = types.bool;
+                    default = false;
+                    description = lib.mdDoc ''
+                      Whether to use redis support
+                    '';
+                  };
+                };
+              };
+              default = { };
+              description = lib.mdDoc ''
+                Redis configuration for synapse.
+
+                See the
+                [upstream documentation](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/usage/configuration/config_documentation.md#redis)
+                for available options.
+              '';
+            };
           };
         };
       };
 
+      workers = lib.mkOption {
+        default = { };
+        description = lib.mdDoc ''
+          Options for configuring workers. Worker support will be enabled if at least one worker is configured here.
+
+          See the [worker documention](https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration)
+          for possible options for each worker. Worker-specific options overriding the shared homeserver configuration can be
+          specified here for each worker.
+
+          ::: {.note}
+            Worker support will add a replication listener on port 9093 to the main synapse process using the default
+            value of [`services.matrix-synapse.settings.listeners`](#opt-services.matrix-synapse.settings.listeners) and configure that
+            listener as `services.matrix-synapse.settings.instance_map.main`.
+            If you set either of those options, make sure to configure a replication listener yourself.
+
+            A redis server is required for running workers. A local one can be enabled
+            using [`services.matrix-synapse.configureRedisLocally`](#opt-services.matrix-synapse.configureRedisLocally).
+
+            Workers also require a proper reverse proxy setup to direct incoming requests to the appropriate process. See
+            the [reverse proxy documentation](https://matrix-org.github.io/synapse/latest/reverse_proxy.html) for a
+            general reverse proxying setup and
+            the [worker documentation](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications)
+            for the available endpoints per worker application.
+          :::
+        '';
+        type = types.attrsOf (types.submodule ({name, ...}: {
+          freeformType = format.type;
+          options = {
+            worker_app = lib.mkOption {
+              type = types.enum [
+                "synapse.app.generic_worker"
+                "synapse.app.media_repository"
+              ];
+              description = "Type of this worker";
+              default = "synapse.app.generic_worker";
+            };
+            worker_listeners = lib.mkOption {
+              default = [ ];
+              type = types.listOf (listenerType true);
+              description = lib.mdDoc ''
+                List of ports that this worker should listen on, their purpose and their configuration.
+              '';
+            };
+            worker_log_config = lib.mkOption {
+              type = types.path;
+              default = genLogConfigFile "synapse-${name}";
+              defaultText = logConfigText "synapse-${name}";
+              description = lib.mdDoc ''
+                The file for log configuration.
+
+                See the [python documentation](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema)
+                for the schema and the [upstream repository](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_log_config.yaml)
+                for an example.
+              '';
+            };
+          };
+        }));
+        default = { };
+        example = lib.literalExpression ''
+          {
+            "federation_sender" = { };
+            "federation_receiver" = {
+              worker_listeners = [
+                {
+                  type = "http";
+                  port = 8009;
+                  bind_addresses = [ "127.0.0.1" ];
+                  tls = false;
+                  x_forwarded = true;
+                  resources = [{
+                    names = [ "federation" ];
+                  }];
+                }
+              ];
+            };
+          }
+        '';
+      };
+
       extraConfigFiles = mkOption {
         type = types.listOf types.path;
-        default = [];
+        default = [ ];
         description = lib.mdDoc ''
           Extra config files to include.
 
@@ -743,12 +924,28 @@ in {
           NixOps is in use.
         '';
       };
+
+      configureRedisLocally = lib.mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to automatically configure a local redis server for matrix-synapse.
+        '';
+      };
     };
   };
 
   config = mkIf cfg.enable {
     assertions = [
-      { assertion = hasLocalPostgresDB -> config.services.postgresql.enable;
+      {
+        assertion = clientListener != null;
+        message = ''
+          At least one listener which serves the `client` resource via HTTP is required
+          by synapse in `services.matrix-synapse.settings.listeners` or in one of the workers!
+        '';
+      }
+      {
+        assertion = hasLocalPostgresDB -> config.services.postgresql.enable;
         message = ''
           Cannot deploy matrix-synapse with a configuration for a local postgresql database
             and a missing postgresql service. Since 20.03 it's mandatory to manually configure the
@@ -764,8 +961,47 @@ in {
           For further information about this update, please read the release-notes of 20.03 carefully.
         '';
       }
+      {
+        assertion = hasWorkers -> cfg.settings.redis.enabled;
+        message = ''
+          Workers for matrix-synapse require configuring a redis instance. This can be done
+          automatically by setting `services.matrix-synapse.configureRedisLocally = true`.
+        '';
+      }
+      {
+        assertion =
+          let
+            main = cfg.settings.instance_map.main;
+            listener = lib.findFirst
+              (
+                listener:
+                  listener.port == main.port
+                  && listenerSupportsResource "replication" listener
+                  && (lib.any (bind: bind == main.host || bind == "0.0.0.0" || bind == "::") listener.bind_addresses)
+              )
+              null
+              cfg.settings.listeners;
+          in
+          hasWorkers -> (cfg.settings.instance_map ? main && listener != null);
+        message = ''
+          Workers for matrix-synapse require setting `services.matrix-synapse.settings.instance_map.main`
+          to any listener configured in `services.matrix-synapse.settings.listeners` with a `"replication"`
+          resource.
+
+          This is done by default unless you manually configure either of those settings.
+        '';
+      }
     ];
 
+    services.matrix-synapse.settings.redis = lib.mkIf cfg.configureRedisLocally {
+      enabled = true;
+      path = config.services.redis.servers.matrix-synapse.unixSocket;
+    };
+    services.matrix-synapse.settings.instance_map.main = lib.mkIf hasWorkers (lib.mkDefault {
+      host = "127.0.0.1";
+      port = 9093;
+    });
+
     services.matrix-synapse.configFile = configFile;
     services.matrix-synapse.package = wrapped;
 
@@ -784,64 +1020,124 @@ in {
       gid = config.ids.gids.matrix-synapse;
     };
 
-    systemd.services.matrix-synapse = {
-      description = "Synapse Matrix homeserver";
-      after = [ "network.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
+    systemd.targets.matrix-synapse = lib.mkIf hasWorkers {
+      description = "Synapse Matrix parent target";
+      after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
       wantedBy = [ "multi-user.target" ];
-      preStart = ''
-        ${cfg.package}/bin/synapse_homeserver \
-          --config-path ${configFile} \
-          --keys-directory ${cfg.dataDir} \
-          --generate-keys
-      '';
-      environment = optionalAttrs (cfg.withJemalloc) {
-        LD_PRELOAD = "${pkgs.jemalloc}/lib/libjemalloc.so";
-      };
-      serviceConfig = {
-        Type = "notify";
-        User = "matrix-synapse";
-        Group = "matrix-synapse";
-        WorkingDirectory = cfg.dataDir;
-        ExecStartPre = [ ("+" + (pkgs.writeShellScript "matrix-synapse-fix-permissions" ''
-          chown matrix-synapse:matrix-synapse ${cfg.settings.signing_key_path}
-          chmod 0600 ${cfg.settings.signing_key_path}
-        '')) ];
-        ExecStart = ''
-          ${cfg.package}/bin/synapse_homeserver \
-            ${ concatMapStringsSep "\n  " (x: "--config-path ${x} \\") ([ configFile ] ++ cfg.extraConfigFiles) }
-            --keys-directory ${cfg.dataDir}
-        '';
-        ExecReload = "${pkgs.util-linux}/bin/kill -HUP $MAINPID";
-        Restart = "on-failure";
-        UMask = "0077";
-
-        # Security Hardening
-        # Refer to systemd.exec(5) for option descriptions.
-        CapabilityBoundingSet = [ "" ];
-        LockPersonality = true;
-        NoNewPrivileges = true;
-        PrivateDevices = true;
-        PrivateTmp = true;
-        PrivateUsers = true;
-        ProcSubset = "pid";
-        ProtectClock = true;
-        ProtectControlGroups = true;
-        ProtectHome = true;
-        ProtectHostname = true;
-        ProtectKernelLogs = true;
-        ProtectKernelModules = true;
-        ProtectKernelTunables = true;
-        ProtectProc = "invisible";
-        ProtectSystem = "strict";
-        ReadWritePaths = [ cfg.dataDir ];
-        RemoveIPC = true;
-        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
-        RestrictNamespaces = true;
-        RestrictRealtime = true;
-        RestrictSUIDSGID = true;
-        SystemCallArchitectures = "native";
-        SystemCallFilter = [ "@system-service" "~@resources" "~@privileged" ];
-      };
+    };
+
+    systemd.services =
+      let
+        targetConfig =
+          if hasWorkers
+          then {
+            partOf = [ "matrix-synapse.target" ];
+            wantedBy = [ "matrix-synapse.target" ];
+            unitConfig.ReloadPropagatedFrom = "matrix-synapse.target";
+          }
+          else {
+            after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
+            wantedBy = [ "multi-user.target" ];
+          };
+        baseServiceConfig = {
+          environment = optionalAttrs (cfg.withJemalloc) {
+            LD_PRELOAD = "${pkgs.jemalloc}/lib/libjemalloc.so";
+          };
+          serviceConfig = {
+            Type = "notify";
+            User = "matrix-synapse";
+            Group = "matrix-synapse";
+            WorkingDirectory = cfg.dataDir;
+            ExecReload = "${pkgs.util-linux}/bin/kill -HUP $MAINPID";
+            Restart = "on-failure";
+            UMask = "0077";
+
+            # Security Hardening
+            # Refer to systemd.exec(5) for option descriptions.
+            CapabilityBoundingSet = [ "" ];
+            LockPersonality = true;
+            NoNewPrivileges = true;
+            PrivateDevices = true;
+            PrivateTmp = true;
+            PrivateUsers = true;
+            ProcSubset = "pid";
+            ProtectClock = true;
+            ProtectControlGroups = true;
+            ProtectHome = true;
+            ProtectHostname = true;
+            ProtectKernelLogs = true;
+            ProtectKernelModules = true;
+            ProtectKernelTunables = true;
+            ProtectProc = "invisible";
+            ProtectSystem = "strict";
+            ReadWritePaths = [ cfg.dataDir ];
+            RemoveIPC = true;
+            RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+            RestrictNamespaces = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+            SystemCallArchitectures = "native";
+            SystemCallFilter = [ "@system-service" "~@resources" "~@privileged" ];
+          };
+        }
+        // targetConfig;
+        genWorkerService = name: workerCfg:
+          let
+            finalWorkerCfg = workerCfg // { worker_name = name; };
+            workerConfigFile = format.generate "worker-${name}.yaml" finalWorkerCfg;
+          in
+          {
+            name = "matrix-synapse-worker-${name}";
+            value = lib.mkMerge [
+              baseServiceConfig
+              {
+                description = "Synapse Matrix worker ${name}";
+                # make sure the main process starts first for potential database migrations
+                after = [ "matrix-synapse.service" ];
+                requires = [ "matrix-synapse.service" ];
+                serviceConfig = {
+                  ExecStart = ''
+                    ${cfg.package}/bin/synapse_worker \
+                      ${ concatMapStringsSep "\n  " (x: "--config-path ${x} \\") ([ configFile workerConfigFile ] ++ cfg.extraConfigFiles) }
+                      --keys-directory ${cfg.dataDir}
+                  '';
+                };
+              }
+            ];
+          };
+      in
+      {
+        matrix-synapse = lib.mkMerge [
+          baseServiceConfig
+          {
+            description = "Synapse Matrix homeserver";
+            preStart = ''
+              ${cfg.package}/bin/synapse_homeserver \
+                --config-path ${configFile} \
+                --keys-directory ${cfg.dataDir} \
+                --generate-keys
+            '';
+            serviceConfig = {
+              ExecStartPre = [
+                ("+" + (pkgs.writeShellScript "matrix-synapse-fix-permissions" ''
+                  chown matrix-synapse:matrix-synapse ${cfg.settings.signing_key_path}
+                  chmod 0600 ${cfg.settings.signing_key_path}
+                ''))
+              ];
+              ExecStart = ''
+                ${cfg.package}/bin/synapse_homeserver \
+                  ${ concatMapStringsSep "\n  " (x: "--config-path ${x} \\") ([ configFile ] ++ cfg.extraConfigFiles) }
+                  --keys-directory ${cfg.dataDir}
+              '';
+            };
+          }
+        ];
+      }
+      // (lib.mapAttrs' genWorkerService cfg.workers);
+
+    services.redis.servers.matrix-synapse = lib.mkIf cfg.configureRedisLocally {
+      enable = true;
+      user = "matrix-synapse";
     };
 
     environment.systemPackages = [ registerNewMatrixUser ];
diff --git a/nixos/modules/services/misc/atuin.nix b/nixos/modules/services/misc/atuin.nix
index 57ff02df7d68..8d2c1b5242ff 100644
--- a/nixos/modules/services/misc/atuin.nix
+++ b/nixos/modules/services/misc/atuin.nix
@@ -6,7 +6,7 @@ in
 {
   options = {
     services.atuin = {
-      enable = lib.mkEnableOption (mdDoc "Enable server for shell history sync with atuin");
+      enable = lib.mkEnableOption (mdDoc "Atuin server for shell history sync");
 
       openRegistration = mkOption {
         type = types.bool;
diff --git a/nixos/modules/services/misc/cfdyndns.nix b/nixos/modules/services/misc/cfdyndns.nix
index 9cd8b188ffae..dba8ac200151 100644
--- a/nixos/modules/services/misc/cfdyndns.nix
+++ b/nixos/modules/services/misc/cfdyndns.nix
@@ -23,6 +23,15 @@ in
         '';
       };
 
+      apiTokenFile = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          The path to a file containing the API Token
+          used to authenticate with CloudFlare.
+        '';
+      };
+
       apikeyFile = mkOption {
         default = null;
         type = types.nullOr types.str;
@@ -51,32 +60,22 @@ in
       startAt = "*:0/5";
       serviceConfig = {
         Type = "simple";
-        User = config.ids.uids.cfdyndns;
-        Group = config.ids.gids.cfdyndns;
+        LoadCredential = lib.optional (cfg.apiTokenFile != null) "CLOUDFLARE_APITOKEN_FILE:${cfg.apiTokenFile}";
+        DynamicUser = true;
       };
       environment = {
-        CLOUDFLARE_EMAIL="${cfg.email}";
         CLOUDFLARE_RECORDS="${concatStringsSep "," cfg.records}";
       };
       script = ''
         ${optionalString (cfg.apikeyFile != null) ''
           export CLOUDFLARE_APIKEY="$(cat ${escapeShellArg cfg.apikeyFile})"
+          export CLOUDFLARE_EMAIL="${cfg.email}"
+        ''}
+        ${optionalString (cfg.apiTokenFile != null) ''
+          export CLOUDFLARE_APITOKEN=$(${pkgs.systemd}/bin/systemd-creds cat CLOUDFLARE_APITOKEN_FILE)
         ''}
         ${pkgs.cfdyndns}/bin/cfdyndns
       '';
     };
-
-    users.users = {
-      cfdyndns = {
-        group = "cfdyndns";
-        uid = config.ids.uids.cfdyndns;
-      };
-    };
-
-    users.groups = {
-      cfdyndns = {
-        gid = config.ids.gids.cfdyndns;
-      };
-    };
   };
 }
diff --git a/nixos/modules/services/misc/forgejo.nix b/nixos/modules/services/misc/forgejo.nix
new file mode 100644
index 000000000000..f26658b7bcb4
--- /dev/null
+++ b/nixos/modules/services/misc/forgejo.nix
@@ -0,0 +1,668 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  cfg = config.services.forgejo;
+  opt = options.services.forgejo;
+  format = pkgs.formats.ini { };
+
+  exe = lib.getExe cfg.package;
+
+  pg = config.services.postgresql;
+  useMysql = cfg.database.type == "mysql";
+  usePostgresql = cfg.database.type == "postgres";
+  useSqlite = cfg.database.type == "sqlite3";
+
+  inherit (lib)
+    literalExpression
+    mdDoc
+    mkChangedOptionModule
+    mkDefault
+    mkEnableOption
+    mkIf
+    mkMerge
+    mkOption
+    mkPackageOptionMD
+    mkRemovedOptionModule
+    mkRenamedOptionModule
+    optionalAttrs
+    optionals
+    optionalString
+    types
+    ;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "forgejo" "appName" ] [ "services" "forgejo" "settings" "DEFAULT" "APP_NAME" ])
+    (mkRemovedOptionModule [ "services" "forgejo" "extraConfig" ] "services.forgejo.extraConfig has been removed. Please use the freeform services.forgejo.settings option instead")
+    (mkRemovedOptionModule [ "services" "forgejo" "database" "password" ] "services.forgejo.database.password has been removed. Please use services.forgejo.database.passwordFile instead")
+
+    # copied from services.gitea; remove at some point
+    (mkRenamedOptionModule [ "services" "forgejo" "cookieSecure" ] [ "services" "forgejo" "settings" "session" "COOKIE_SECURE" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "disableRegistration" ] [ "services" "forgejo" "settings" "service" "DISABLE_REGISTRATION" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "domain" ] [ "services" "forgejo" "settings" "server" "DOMAIN" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "httpAddress" ] [ "services" "forgejo" "settings" "server" "HTTP_ADDR" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "httpPort" ] [ "services" "forgejo" "settings" "server" "HTTP_PORT" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "log" "level" ] [ "services" "forgejo" "settings" "log" "LEVEL" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "log" "rootPath" ] [ "services" "forgejo" "settings" "log" "ROOT_PATH" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "rootUrl" ] [ "services" "forgejo" "settings" "server" "ROOT_URL" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "ssh" "clonePort" ] [ "services" "forgejo" "settings" "server" "SSH_PORT" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "staticRootPath" ] [ "services" "forgejo" "settings" "server" "STATIC_ROOT_PATH" ])
+    (mkChangedOptionModule [ "services" "forgejo" "enableUnixSocket" ] [ "services" "forgejo" "settings" "server" "PROTOCOL" ] (
+      config: if config.services.forgejo.enableUnixSocket then "http+unix" else "http"
+    ))
+    (mkRemovedOptionModule [ "services" "forgejo" "ssh" "enable" ] "services.forgejo.ssh.enable has been migrated into freeform setting services.forgejo.settings.server.DISABLE_SSH. Keep in mind that the setting is inverted")
+  ];
+
+  options = {
+    services.forgejo = {
+      enable = mkEnableOption (mdDoc "Forgejo");
+
+      package = mkPackageOptionMD pkgs "forgejo" { };
+
+      useWizard = mkOption {
+        default = false;
+        type = types.bool;
+        description = mdDoc ''
+          Whether to use the built-in installation wizard instead of
+          declaratively managing the {file}`app.ini` config file in nix.
+        '';
+      };
+
+      stateDir = mkOption {
+        default = "/var/lib/forgejo";
+        type = types.str;
+        description = mdDoc "Forgejo data directory.";
+      };
+
+      customDir = mkOption {
+        default = "${cfg.stateDir}/custom";
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/custom"'';
+        type = types.str;
+        description = mdDoc ''
+          Base directory for custom templates and other options.
+
+          If {option}`${opt.useWizard}` is disabled (default), this directory will also
+          hold secrets and the resulting {file}`app.ini` config at runtime.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "forgejo";
+        description = mdDoc "User account under which Forgejo runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "forgejo";
+        description = mdDoc "Group under which Forgejo runs.";
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "sqlite3" "mysql" "postgres" ];
+          example = "mysql";
+          default = "sqlite3";
+          description = mdDoc "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = mdDoc "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = if !usePostgresql then 3306 else pg.port;
+          defaultText = literalExpression ''
+            if config.${opt.database.type} != "postgresql"
+            then 3306
+            else config.${options.services.postgresql.port}
+          '';
+          description = mdDoc "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "forgejo";
+          description = mdDoc "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "forgejo";
+          description = mdDoc "Database user.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/forgejo-dbpassword";
+          description = mdDoc ''
+            A file containing the password corresponding to
+            {option}`${opt.database.user}`.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = if (cfg.database.createDatabase && usePostgresql) then "/run/postgresql" else if (cfg.database.createDatabase && useMysql) then "/run/mysqld/mysqld.sock" else null;
+          defaultText = literalExpression "null";
+          example = "/run/mysqld/mysqld.sock";
+          description = mdDoc "Path to the unix socket file to use for authentication.";
+        };
+
+        path = mkOption {
+          type = types.str;
+          default = "${cfg.stateDir}/data/forgejo.db";
+          defaultText = literalExpression ''"''${config.${opt.stateDir}}/data/forgejo.db"'';
+          description = mdDoc "Path to the sqlite3 database file.";
+        };
+
+        createDatabase = mkOption {
+          type = types.bool;
+          default = true;
+          description = mdDoc "Whether to create a local database automatically.";
+        };
+      };
+
+      dump = {
+        enable = mkEnableOption (mdDoc "periodic dumps via the [built-in {command}`dump` command](https://forgejo.org/docs/latest/admin/command-line/#dump)");
+
+        interval = mkOption {
+          type = types.str;
+          default = "04:31";
+          example = "hourly";
+          description = mdDoc ''
+            Run a Forgejo dump at this interval. Runs by default at 04:31 every day.
+
+            The format is described in
+            {manpage}`systemd.time(7)`.
+          '';
+        };
+
+        backupDir = mkOption {
+          type = types.str;
+          default = "${cfg.stateDir}/dump";
+          defaultText = literalExpression ''"''${config.${opt.stateDir}}/dump"'';
+          description = mdDoc "Path to the directory where the dump archives will be stored.";
+        };
+
+        type = mkOption {
+          type = types.enum [ "zip" "tar" "tar.sz" "tar.gz" "tar.xz" "tar.bz2" "tar.br" "tar.lz4" "tar.zst" ];
+          default = "zip";
+          description = mdDoc "Archive format used to store the dump file.";
+        };
+
+        file = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = mdDoc "Filename to be used for the dump. If `null` a default name is chosen by forgejo.";
+          example = "forgejo-dump";
+        };
+      };
+
+      lfs = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = mdDoc "Enables git-lfs support.";
+        };
+
+        contentDir = mkOption {
+          type = types.str;
+          default = "${cfg.stateDir}/data/lfs";
+          defaultText = literalExpression ''"''${config.${opt.stateDir}}/data/lfs"'';
+          description = mdDoc "Where to store LFS files.";
+        };
+      };
+
+      repositoryRoot = mkOption {
+        type = types.str;
+        default = "${cfg.stateDir}/repositories";
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/repositories"'';
+        description = mdDoc "Path to the git repositories.";
+      };
+
+      mailerPasswordFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/run/keys/forgejo-mailpw";
+        description = mdDoc "Path to a file containing the SMTP password.";
+      };
+
+      settings = mkOption {
+        default = { };
+        description = mdDoc ''
+          Free-form settings written directly to the `app.ini` configfile file.
+          Refer to <https://forgejo.org/docs/latest/admin/config-cheat-sheet/> for supported values.
+        '';
+        example = literalExpression ''
+          {
+            DEFAULT = {
+              RUN_MODE = "dev";
+            };
+            "cron.sync_external_users" = {
+              RUN_AT_START = true;
+              SCHEDULE = "@every 24h";
+              UPDATE_EXISTING = true;
+            };
+            mailer = {
+              ENABLED = true;
+              MAILER_TYPE = "sendmail";
+              FROM = "do-not-reply@example.org";
+              SENDMAIL_PATH = "''${pkgs.system-sendmail}/bin/sendmail";
+            };
+            other = {
+              SHOW_FOOTER_VERSION = false;
+            };
+          }
+        '';
+        type = types.submodule {
+          freeformType = format.type;
+          options = {
+            log = {
+              ROOT_PATH = mkOption {
+                default = "${cfg.stateDir}/log";
+                defaultText = literalExpression ''"''${config.${opt.stateDir}}/log"'';
+                type = types.str;
+                description = mdDoc "Root path for log files.";
+              };
+              LEVEL = mkOption {
+                default = "Info";
+                type = types.enum [ "Trace" "Debug" "Info" "Warn" "Error" "Critical" ];
+                description = mdDoc "General log level.";
+              };
+            };
+
+            server = {
+              PROTOCOL = mkOption {
+                type = types.enum [ "http" "https" "fcgi" "http+unix" "fcgi+unix" ];
+                default = "http";
+                description = mdDoc ''Listen protocol. `+unix` means "over unix", not "in addition to."'';
+              };
+
+              HTTP_ADDR = mkOption {
+                type = types.either types.str types.path;
+                default = if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/forgejo/forgejo.sock" else "0.0.0.0";
+                defaultText = literalExpression ''if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/forgejo/forgejo.sock" else "0.0.0.0"'';
+                description = mdDoc "Listen address. Must be a path when using a unix socket.";
+              };
+
+              HTTP_PORT = mkOption {
+                type = types.port;
+                default = 3000;
+                description = mdDoc "Listen port. Ignored when using a unix socket.";
+              };
+
+              DOMAIN = mkOption {
+                type = types.str;
+                default = "localhost";
+                description = mdDoc "Domain name of your server.";
+              };
+
+              ROOT_URL = mkOption {
+                type = types.str;
+                default = "http://${cfg.settings.server.DOMAIN}:${toString cfg.settings.server.HTTP_PORT}/";
+                defaultText = literalExpression ''"http://''${config.services.forgejo.settings.server.DOMAIN}:''${toString config.services.forgejo.settings.server.HTTP_PORT}/"'';
+                description = mdDoc "Full public URL of Forgejo server.";
+              };
+
+              STATIC_ROOT_PATH = mkOption {
+                type = types.either types.str types.path;
+                default = cfg.package.data;
+                defaultText = literalExpression "config.${opt.package}.data";
+                example = "/var/lib/forgejo/data";
+                description = mdDoc "Upper level of template and static files path.";
+              };
+
+              DISABLE_SSH = mkOption {
+                type = types.bool;
+                default = false;
+                description = mdDoc "Disable external SSH feature.";
+              };
+
+              SSH_PORT = mkOption {
+                type = types.port;
+                default = 22;
+                example = 2222;
+                description = mdDoc ''
+                  SSH port displayed in clone URL.
+                  The option is required to configure a service when the external visible port
+                  differs from the local listening port i.e. if port forwarding is used.
+                '';
+              };
+            };
+
+            session = {
+              COOKIE_SECURE = mkOption {
+                type = types.bool;
+                default = false;
+                description = mdDoc ''
+                  Marks session cookies as "secure" as a hint for browsers to only send
+                  them via HTTPS. This option is recommend, if Forgejo is being served over HTTPS.
+                '';
+              };
+            };
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.database.createDatabase -> useSqlite || cfg.database.user == cfg.user;
+        message = "services.forgejo.database.user must match services.forgejo.user if the database is to be automatically provisioned";
+      }
+    ];
+
+    services.forgejo.settings = {
+      DEFAULT = {
+        RUN_MODE = mkDefault "prod";
+        RUN_USER = mkDefault cfg.user;
+        WORK_PATH = mkDefault cfg.stateDir;
+      };
+
+      database = mkMerge [
+        {
+          DB_TYPE = cfg.database.type;
+        }
+        (mkIf (useMysql || usePostgresql) {
+          HOST = if cfg.database.socket != null then cfg.database.socket else cfg.database.host + ":" + toString cfg.database.port;
+          NAME = cfg.database.name;
+          USER = cfg.database.user;
+          PASSWD = "#dbpass#";
+        })
+        (mkIf useSqlite {
+          PATH = cfg.database.path;
+        })
+        (mkIf usePostgresql {
+          SSL_MODE = "disable";
+        })
+      ];
+
+      repository = {
+        ROOT = cfg.repositoryRoot;
+      };
+
+      server = mkIf cfg.lfs.enable {
+        LFS_START_SERVER = true;
+        LFS_JWT_SECRET = "#lfsjwtsecret#";
+      };
+
+      session = {
+        COOKIE_NAME = mkDefault "session";
+      };
+
+      security = {
+        SECRET_KEY = "#secretkey#";
+        INTERNAL_TOKEN = "#internaltoken#";
+        INSTALL_LOCK = true;
+      };
+
+      mailer = mkIf (cfg.mailerPasswordFile != null) {
+        PASSWD = "#mailerpass#";
+      };
+
+      oauth2 = {
+        JWT_SECRET = "#oauth2jwtsecret#";
+      };
+
+      lfs = mkIf cfg.lfs.enable {
+        PATH = cfg.lfs.contentDir;
+      };
+    };
+
+    services.postgresql = optionalAttrs (usePostgresql && cfg.database.createDatabase) {
+      enable = mkDefault true;
+
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        {
+          name = cfg.database.user;
+          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    services.mysql = optionalAttrs (useMysql && cfg.database.createDatabase) {
+      enable = mkDefault true;
+      package = mkDefault pkgs.mariadb;
+
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        {
+          name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/.ssh' 0700 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
+
+      # If we have a folder or symlink with Forgejo locales, remove it
+      # And symlink the current Forgejo locales in place
+      "L+ '${cfg.stateDir}/conf/locale' - - - - ${cfg.package.out}/locale"
+
+    ] ++ optionals cfg.lfs.enable [
+      "d '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.forgejo = {
+      description = "Forgejo (Beyond coding. We forge.)";
+      after = [
+        "network.target"
+      ] ++ optionals usePostgresql [
+        "postgresql.service"
+      ] ++ optionals useMysql [
+        "mysql.service"
+      ];
+      requires = optionals (cfg.database.createDatabase && usePostgresql) [
+        "postgresql.service"
+      ] ++ optionals (cfg.database.createDatabase && useMysql) [
+        "mysql.service"
+      ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ cfg.package pkgs.git pkgs.gnupg ];
+
+      # In older versions the secret naming for JWT was kind of confusing.
+      # The file jwt_secret hold the value for LFS_JWT_SECRET and JWT_SECRET
+      # wasn't persistent at all.
+      # To fix that, there is now the file oauth2_jwt_secret containing the
+      # values for JWT_SECRET and the file jwt_secret gets renamed to
+      # lfs_jwt_secret.
+      # We have to consider this to stay compatible with older installations.
+      preStart =
+        let
+          runConfig = "${cfg.customDir}/conf/app.ini";
+          secretKey = "${cfg.customDir}/conf/secret_key";
+          oauth2JwtSecret = "${cfg.customDir}/conf/oauth2_jwt_secret";
+          oldLfsJwtSecret = "${cfg.customDir}/conf/jwt_secret"; # old file for LFS_JWT_SECRET
+          lfsJwtSecret = "${cfg.customDir}/conf/lfs_jwt_secret"; # new file for LFS_JWT_SECRET
+          internalToken = "${cfg.customDir}/conf/internal_token";
+          replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
+        in
+        ''
+          # copy custom configuration and generate random secrets if needed
+          ${lib.optionalString (!cfg.useWizard) ''
+            function forgejo_setup {
+              cp -f '${format.generate "app.ini" cfg.settings}' '${runConfig}'
+
+              if [ ! -s '${secretKey}' ]; then
+                  ${exe} generate secret SECRET_KEY > '${secretKey}'
+              fi
+
+              # Migrate LFS_JWT_SECRET filename
+              if [[ -s '${oldLfsJwtSecret}' && ! -s '${lfsJwtSecret}' ]]; then
+                  mv '${oldLfsJwtSecret}' '${lfsJwtSecret}'
+              fi
+
+              if [ ! -s '${oauth2JwtSecret}' ]; then
+                  ${exe} generate secret JWT_SECRET > '${oauth2JwtSecret}'
+              fi
+
+              ${optionalString cfg.lfs.enable ''
+              if [ ! -s '${lfsJwtSecret}' ]; then
+                  ${exe} generate secret LFS_JWT_SECRET > '${lfsJwtSecret}'
+              fi
+              ''}
+
+              if [ ! -s '${internalToken}' ]; then
+                  ${exe} generate secret INTERNAL_TOKEN > '${internalToken}'
+              fi
+
+              chmod u+w '${runConfig}'
+              ${replaceSecretBin} '#secretkey#' '${secretKey}' '${runConfig}'
+              ${replaceSecretBin} '#oauth2jwtsecret#' '${oauth2JwtSecret}' '${runConfig}'
+              ${replaceSecretBin} '#internaltoken#' '${internalToken}' '${runConfig}'
+
+              ${optionalString cfg.lfs.enable ''
+                ${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
+              ''}
+
+              ${optionalString (cfg.database.passwordFile != null) ''
+                ${replaceSecretBin} '#dbpass#' '${cfg.database.passwordFile}' '${runConfig}'
+              ''}
+
+              ${optionalString (cfg.mailerPasswordFile != null) ''
+                ${replaceSecretBin} '#mailerpass#' '${cfg.mailerPasswordFile}' '${runConfig}'
+              ''}
+              chmod u-w '${runConfig}'
+            }
+            (umask 027; forgejo_setup)
+          ''}
+
+          # run migrations/init the database
+          ${exe} migrate
+
+          # update all hooks' binary paths
+          ${exe} admin regenerate hooks
+
+          # update command option in authorized_keys
+          if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
+          then
+            ${exe} admin regenerate keys
+          fi
+        '';
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.stateDir;
+        ExecStart = "${exe} web --pid /run/forgejo/forgejo.pid";
+        Restart = "always";
+        # Runtime directory and mode
+        RuntimeDirectory = "forgejo";
+        RuntimeDirectoryMode = "0755";
+        # Proc filesystem
+        ProcSubset = "pid";
+        ProtectProc = "invisible";
+        # Access write directories
+        ReadWritePaths = [ cfg.customDir cfg.dump.backupDir cfg.repositoryRoot cfg.stateDir cfg.lfs.contentDir ];
+        UMask = "0027";
+        # Capabilities
+        CapabilityBoundingSet = "";
+        # Security
+        NoNewPrivileges = true;
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @setuid" "setrlimit" ];
+      };
+
+      environment = {
+        USER = cfg.user;
+        HOME = cfg.stateDir;
+        # `GITEA_` prefix until https://codeberg.org/forgejo/forgejo/issues/497
+        # is resolved.
+        GITEA_WORK_DIR = cfg.stateDir;
+        GITEA_CUSTOM = cfg.customDir;
+      };
+    };
+
+    users.users = mkIf (cfg.user == "forgejo") {
+      forgejo = {
+        home = cfg.stateDir;
+        useDefaultShell = true;
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "forgejo") {
+      forgejo = { };
+    };
+
+    systemd.services.forgejo-dump = mkIf cfg.dump.enable {
+      description = "forgejo dump";
+      after = [ "forgejo.service" ];
+      path = [ cfg.package ];
+
+      environment = {
+        USER = cfg.user;
+        HOME = cfg.stateDir;
+        # `GITEA_` prefix until https://codeberg.org/forgejo/forgejo/issues/497
+        # is resolved.
+        GITEA_WORK_DIR = cfg.stateDir;
+        GITEA_CUSTOM = cfg.customDir;
+      };
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = cfg.user;
+        ExecStart = "${exe} dump --type ${cfg.dump.type}" + optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}";
+        WorkingDirectory = cfg.dump.backupDir;
+      };
+    };
+
+    systemd.timers.forgejo-dump = mkIf cfg.dump.enable {
+      description = "Forgejo dump timer";
+      partOf = [ "forgejo-dump.service" ];
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = cfg.dump.interval;
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ bendlas emilylange ];
+}
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index c5e38b498829..b399ccc38f58 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -1088,6 +1088,11 @@ in {
         ''Support for container registries other than gitlab-container-registry has ended since GitLab 16.0.0 and is scheduled for removal in a future release.
           Please back up your data and migrate to the gitlab-container-registry package.''
       )
+      (mkIf
+        (versionAtLeast (getVersion cfg.packages.gitlab) "16.2.0" && versionOlder (getVersion cfg.packages.gitlab) "16.5.0")
+        ''GitLab instances created or updated between versions [15.11.0, 15.11.2] have an incorrect database schema.
+        Check the upstream documentation for a workaround: https://docs.gitlab.com/ee/update/versions/gitlab_16_changes.html#undefined-column-error-upgrading-to-162-or-later''
+      )
     ];
 
     assertions = [
@@ -1655,7 +1660,7 @@ in {
         Restart = "on-failure";
         WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
         ExecStart = concatStringsSep " " [
-          "${cfg.packages.gitlab.rubyEnv}/bin/puma"
+          "${cfg.packages.gitlab.rubyEnv}/bin/bundle" "exec" "puma"
           "-e production"
           "-C ${cfg.statePath}/config/puma.rb"
           "-w ${cfg.puma.workers}"
diff --git a/nixos/modules/services/misc/jellyfin.nix b/nixos/modules/services/misc/jellyfin.nix
index 2a4483199d7d..43fdc09f4559 100644
--- a/nixos/modules/services/misc/jellyfin.nix
+++ b/nixos/modules/services/misc/jellyfin.nix
@@ -46,7 +46,8 @@ in
   config = mkIf cfg.enable {
     systemd.services.jellyfin = {
       description = "Jellyfin Media Server";
-      after = [ "network.target" ];
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
 
       # This is mostly follows: https://github.com/jellyfin/jellyfin/blob/master/fedora/jellyfin.service
diff --git a/nixos/modules/services/misc/mbpfan.nix b/nixos/modules/services/misc/mbpfan.nix
index e75c35254143..8f64fb2d9c52 100644
--- a/nixos/modules/services/misc/mbpfan.nix
+++ b/nixos/modules/services/misc/mbpfan.nix
@@ -26,7 +26,7 @@ in {
 
     aggressive = mkOption {
       type = types.bool;
-      default = false;
+      default = true;
       description = lib.mdDoc "If true, favors higher default fan speeds.";
     };
 
@@ -38,17 +38,20 @@ in {
 
         options.general.low_temp = mkOption {
           type = types.int;
-          default = 63;
+          default = (if cfg.aggressive then 55 else 63);
+          defaultText = literalExpression "55";
           description = lib.mdDoc "If temperature is below this, fans will run at minimum speed.";
         };
         options.general.high_temp = mkOption {
           type = types.int;
-          default = 66;
+          default = (if cfg.aggressive then 58 else 66);
+          defaultText = literalExpression "58";
           description = lib.mdDoc "If temperature is above this, fan speed will gradually increase.";
         };
         options.general.max_temp = mkOption {
           type = types.int;
-          default = 86;
+          default = (if cfg.aggressive then 78 else 86);
+          defaultText = literalExpression "78";
           description = lib.mdDoc "If temperature is above this, fans will run at maximum speed.";
         };
         options.general.polling_interval = mkOption {
@@ -70,13 +73,6 @@ in {
   ];
 
   config = mkIf cfg.enable {
-    services.mbpfan.settings = mkIf cfg.aggressive {
-      general.min_fan1_speed = mkDefault 2000;
-      general.low_temp = mkDefault 55;
-      general.high_temp = mkDefault 58;
-      general.max_temp = mkDefault 70;
-    };
-
     boot.kernelModules = [ "coretemp" "applesmc" ];
     environment.systemPackages = [ cfg.package ];
     environment.etc."mbpfan.conf".source = settingsFile;
@@ -86,6 +82,7 @@ in {
       wantedBy = [ "sysinit.target" ];
       after = [ "syslog.target" "sysinit.target" ];
       restartTriggers = [ config.environment.etc."mbpfan.conf".source ];
+
       serviceConfig = {
         Type = "simple";
         ExecStart = "${cfg.package}/bin/mbpfan -f${verbose}";
diff --git a/nixos/modules/services/misc/paperless.nix b/nixos/modules/services/misc/paperless.nix
index 0683a1f922ab..74a3b49ac9a6 100644
--- a/nixos/modules/services/misc/paperless.nix
+++ b/nixos/modules/services/misc/paperless.nix
@@ -43,6 +43,8 @@ let
       "-/etc/nsswitch.conf"
       "-/etc/hosts"
       "-/etc/localtime"
+      "-/etc/ssl/certs"
+      "-/etc/static/ssl/certs"
       "-/run/postgresql"
     ] ++ (optional enableRedis redisServer.unixSocket);
     BindPaths = [
diff --git a/nixos/modules/services/monitoring/below.nix b/nixos/modules/services/monitoring/below.nix
index 92ee3882cac8..4a7135162ac4 100644
--- a/nixos/modules/services/monitoring/below.nix
+++ b/nixos/modules/services/monitoring/below.nix
@@ -103,4 +103,6 @@ in {
       };
     };
   };
+
+  meta.maintainers = with lib.maintainers; [ nicoo ];
 }
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 571b9a3aeebd..e90a0e9d16db 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -88,26 +88,6 @@ let
   # Get a submodule without any embedded metadata:
   _filter = x: filterAttrs (k: v: k != "_module") x;
 
-  # FIXME(@Ma27) remove before 23.05. This is just a helper-type
-  # because `mkRenamedOptionModule` doesn't work if `foo.bar` is renamed
-  # to `foo.bar.baz`.
-  submodule' = module: types.coercedTo
-    (mkOptionType {
-      name = "grafana-provision-submodule";
-      description = "Wrapper-type for backwards compat of Grafana's declarative provisioning";
-      check = x:
-        if builtins.isList x then
-          throw ''
-            Provisioning dashboards and datasources declaratively by
-            setting `dashboards` or `datasources` to a list is not supported
-            anymore. Use `services.grafana.provision.datasources.settings.datasources`
-            (or `services.grafana.provision.dashboards.settings.providers`) instead.
-          ''
-        else isAttrs x || isFunction x;
-    })
-    id
-    (types.submodule module);
-
   # http://docs.grafana.org/administration/provisioning/#datasources
   grafanaTypes.datasourceConfig = types.submodule {
     freeformType = provisioningSettingsFormat.type;
@@ -1160,7 +1140,7 @@ in
           Declaratively provision Grafana's datasources.
         '';
         default = { };
-        type = submodule' {
+        type = types.submodule {
           options.settings = mkOption {
             description = lib.mdDoc ''
               Grafana datasource configuration in Nix. Can't be used with
@@ -1235,7 +1215,7 @@ in
           Declaratively provision Grafana's dashboards.
         '';
         default = { };
-        type = submodule' {
+        type = types.submodule {
           options.settings = mkOption {
             description = lib.mdDoc ''
               Grafana dashboard configuration in Nix. Can't be used with
diff --git a/nixos/modules/services/monitoring/mimir.nix b/nixos/modules/services/monitoring/mimir.nix
index edca9b7be4ff..6ed139b22974 100644
--- a/nixos/modules/services/monitoring/mimir.nix
+++ b/nixos/modules/services/monitoring/mimir.nix
@@ -32,11 +32,21 @@ in {
       type = types.package;
       description = lib.mdDoc ''Mimir package to use.'';
     };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "--config.expand-env=true" ];
+      description = lib.mdDoc ''
+        Specify a list of additional command line flags,
+        which get escaped and are then passed to Mimir.
+      '';
+    };
   };
 
   config = mkIf cfg.enable {
     # for mimirtool
-    environment.systemPackages = [ pkgs.mimir ];
+    environment.systemPackages = [ cfg.package ];
 
     assertions = [{
       assertion = (
@@ -60,7 +70,7 @@ in {
                else cfg.configFile;
       in
       {
-        ExecStart = "${cfg.package}/bin/mimir --config.file=${conf}";
+        ExecStart = "${cfg.package}/bin/mimir --config.file=${conf} ${escapeShellArgs cfg.extraFlags}";
         DynamicUser = true;
         Restart = "always";
         ProtectSystem = "full";
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index f5b97c51186a..66aff30b5ed1 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -50,6 +50,7 @@ let
     "mikrotik"
     "minio"
     "modemmanager"
+    "mysqld"
     "nextcloud"
     "nginx"
     "nginxlog"
@@ -297,6 +298,20 @@ in
           or 'services.prometheus.exporters.mail.configFile'.
       '';
     } {
+      assertion = cfg.mysqld.runAsLocalSuperUser -> config.services.mysql.enable;
+      message = ''
+        The exporter is configured to run as 'services.mysql.user', but
+          'services.mysql.enable' is set to false.
+      '';
+    } {
+      assertion = cfg.nextcloud.enable -> (
+        (cfg.nextcloud.passwordFile == null) != (cfg.nextcloud.tokenFile == null)
+      );
+      message = ''
+        Please specify either 'services.prometheus.exporters.nextcloud.passwordFile' or
+          'services.prometheus.exporters.nextcloud.tokenFile'
+      '';
+    } {
       assertion = cfg.sql.enable -> (
         (cfg.sql.configFile == null) != (cfg.sql.configuration == null)
       );
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix b/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
index 66eaed51d2ea..407bff1d62de 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
@@ -21,7 +21,7 @@ let
       throw
       "${logPrefix}: configuration file must not reside within /tmp - it won't be visible to the systemd service."
     else
-      true;
+      file;
   checkConfig = file:
     pkgs.runCommand "checked-blackbox-exporter.conf" {
       preferLocalBuild = true;
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix b/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix
new file mode 100644
index 000000000000..849c514de681
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, options }:
+let
+  cfg = config.services.prometheus.exporters.mysqld;
+  inherit (lib) types mkOption mdDoc mkIf mkForce cli concatStringsSep optionalString escapeShellArgs;
+in {
+  port = 9104;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+
+    runAsLocalSuperUser = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Whether to run the exporter as {option}`services.mysql.user`.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.path;
+      example = "/var/lib/prometheus-mysqld-exporter.cnf";
+      description = mdDoc ''
+        Path to the services config file.
+
+        See <https://github.com/prometheus/mysqld_exporter#running> for more information about
+        the available options.
+
+        ::: {.warn}
+        Please do not store this file in the nix store if you choose to include any credentials here,
+        as it would be world-readable.
+        :::
+      '';
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = !cfg.runAsLocalSuperUser;
+      User = mkIf cfg.runAsLocalSuperUser (mkForce config.services.mysql.user);
+      LoadCredential = mkIf (cfg.configFile != null) (mkForce ("config:" + cfg.configFile));
+      ExecStart = concatStringsSep " " [
+        "${pkgs.prometheus-mysqld-exporter}/bin/mysqld_exporter"
+        "--web.listen-address=${cfg.listenAddress}:${toString cfg.port}"
+        "--web.telemetry-path=${cfg.telemetryPath}"
+        (optionalString (cfg.configFile != null) ''--config.my-cnf=''${CREDENTIALS_DIRECTORY}/config'')
+        (escapeShellArgs cfg.extraFlags)
+      ];
+      RestrictAddressFamilies = [
+        # The exporter can be configured to talk to a local mysql server via a unix socket.
+        "AF_UNIX"
+      ];
+    };
+  };
+}
+
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix b/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
index 7808c8861a76..28a3eb6a134c 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
@@ -23,16 +23,27 @@ in
       description = lib.mdDoc ''
         Username for connecting to Nextcloud.
         Note that this account needs to have admin privileges in Nextcloud.
+        Unused when using token authentication.
       '';
     };
     passwordFile = mkOption {
-      type = types.path;
+      type = types.nullOr types.path;
+      default = null;
       example = "/path/to/password-file";
       description = lib.mdDoc ''
         File containing the password for connecting to Nextcloud.
         Make sure that this file is readable by the exporter user.
       '';
     };
+    tokenFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/path/to/token-file";
+      description = lib.mdDoc ''
+        File containing the token for connecting to Nextcloud.
+        Make sure that this file is readable by the exporter user.
+      '';
+    };
     timeout = mkOption {
       type = types.str;
       default = "5s";
@@ -47,12 +58,15 @@ in
       ExecStart = ''
         ${pkgs.prometheus-nextcloud-exporter}/bin/nextcloud-exporter \
           --addr ${cfg.listenAddress}:${toString cfg.port} \
-          --username ${cfg.username} \
           --timeout ${cfg.timeout} \
           --server ${cfg.url} \
-          --password ${escapeShellArg "@${cfg.passwordFile}"} \
-          ${concatStringsSep " \\\n  " cfg.extraFlags}
-      '';
+          ${if cfg.passwordFile != null then ''
+            --username ${cfg.username} \
+            --password ${escapeShellArg "@${cfg.passwordFile}"} \
+          '' else ''
+            --auth-token ${escapeShellArg "@${cfg.tokenFile}"} \
+          ''} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}'';
     };
   };
 }
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix b/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
index f52d92a73d5d..f2336429d42f 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
@@ -1,4 +1,8 @@
-{ config, lib, pkgs, options }:
+{ config
+, lib
+, pkgs
+, options
+}:
 
 with lib;
 
@@ -6,17 +10,14 @@ let
   cfg = config.services.prometheus.exporters.unbound;
 in
 {
+  imports = [
+    (mkRemovedOptionModule [ "controlInterface" ] "This option was removed, use the `unbound.host` option instead.")
+    (mkRemovedOptionModule [ "fetchType" ] "This option was removed, use the `unbound.host` option instead.")
+    ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+  ];
+
   port = 9167;
   extraOpts = {
-    fetchType = mkOption {
-      # TODO: add shm when upstream implemented it
-      type = types.enum [ "tcp" "uds" ];
-      default = "uds";
-      description = lib.mdDoc ''
-        Which methods the exporter uses to get the information from unbound.
-      '';
-    };
-
     telemetryPath = mkOption {
       type = types.str;
       default = "/metrics";
@@ -25,34 +26,65 @@ in
       '';
     };
 
-    controlInterface = mkOption {
-      type = types.nullOr types.str;
-      default = null;
-      example = "/run/unbound/unbound.socket";
-      description = lib.mdDoc ''
-        Path to the unbound socket for uds mode or the control interface port for tcp mode.
+    unbound = {
+      ca = mkOption {
+        type = types.nullOr types.path;
+        default = "/var/lib/unbound/unbound_server.pem";
+        example = null;
+        description = ''
+          Path to the Unbound server certificate authority
+        '';
+      };
 
-        Example:
-          uds-mode: /run/unbound/unbound.socket
-          tcp-mode: 127.0.0.1:8953
-      '';
+      certificate = mkOption {
+        type = types.nullOr types.path;
+        default = "/var/lib/unbound/unbound_control.pem";
+        example = null;
+        description = ''
+          Path to the Unbound control socket certificate
+        '';
+      };
+
+      key = mkOption {
+        type = types.nullOr types.path;
+        default = "/var/lib/unbound/unbound_control.key";
+        example = null;
+        description = ''
+          Path to the Unbound control socket key.
+        '';
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "tcp://127.0.0.1:8953";
+        example = "unix:///run/unbound/unbound.socket";
+        description = lib.mdDoc ''
+          Path to the unbound control socket. Supports unix domain sockets, as well as the TCP interface.
+        '';
+      };
     };
   };
 
   serviceOpts = mkMerge ([{
     serviceConfig = {
+      User = "unbound"; # to access the unbound_control.key
       ExecStart = ''
-        ${pkgs.prometheus-unbound-exporter}/bin/unbound-telemetry \
-          ${cfg.fetchType} \
-          --bind ${cfg.listenAddress}:${toString cfg.port} \
-          --path ${cfg.telemetryPath} \
-          ${optionalString (cfg.controlInterface != null) "--control-interface ${cfg.controlInterface}"} \
+        ${pkgs.prometheus-unbound-exporter}/bin/unbound_exporter \
+          --unbound.host "${cfg.unbound.host}" \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          ${optionalString (cfg.unbound.ca != null) "--unbound.ca ${cfg.unbound.ca}"} \
+          ${optionalString (cfg.unbound.certificate != null) "--unbound.cert ${cfg.unbound.certificate}"} \
+          ${optionalString (cfg.unbound.key != null) "--unbound.key ${cfg.unbound.key}"} \
           ${toString cfg.extraFlags}
       '';
       RestrictAddressFamilies = [
-        # Need AF_UNIX to collect data
         "AF_UNIX"
+        "AF_INET"
+        "AF_INET6"
       ];
+    } // optionalAttrs (!config.services.unbound.enable) {
+      DynamicUser = true;
     };
   }] ++ [
     (mkIf config.services.unbound.enable {
diff --git a/nixos/modules/services/monitoring/vmagent.nix b/nixos/modules/services/monitoring/vmagent.nix
index c793bb073199..0e2ffb31c57c 100644
--- a/nixos/modules/services/monitoring/vmagent.nix
+++ b/nixos/modules/services/monitoring/vmagent.nix
@@ -62,6 +62,16 @@ in {
         Whether to open the firewall for the default ports.
       '';
     };
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra args to pass to `vmagent`. See the docs:
+        <https://docs.victoriametrics.com/vmagent.html#advanced-usage>
+        or {command}`vmagent -help` for more information.
+      '';
+    };
   };
 
   config = mkIf cfg.enable {
@@ -90,7 +100,7 @@ in {
         Type = "simple";
         Restart = "on-failure";
         WorkingDirectory = cfg.dataDir;
-        ExecStart = "${cfg.package}/bin/vmagent -remoteWrite.url=${cfg.remoteWriteUrl} -promscrape.config=${prometheusConfig}";
+        ExecStart = "${cfg.package}/bin/vmagent -remoteWrite.url=${cfg.remoteWriteUrl} -promscrape.config=${prometheusConfig} ${escapeShellArgs cfg.extraArgs}";
       };
     };
 
diff --git a/nixos/modules/services/networking/adguardhome.nix b/nixos/modules/services/networking/adguardhome.nix
index 1701e5b439c1..399d838ccc69 100644
--- a/nixos/modules/services/networking/adguardhome.nix
+++ b/nixos/modules/services/networking/adguardhome.nix
@@ -17,6 +17,7 @@ let
     text = builtins.toJSON cfg.settings;
     checkPhase = "${pkgs.adguardhome}/bin/adguardhome -c $out --check-config";
   };
+  defaultBindPort = 3000;
 
 in
 {
@@ -86,7 +87,7 @@ in
             '';
           };
           bind_port = mkOption {
-            default = 3000;
+            default = defaultBindPort;
             type = port;
             description = lib.mdDoc ''
               Port to serve HTTP pages on.
@@ -169,6 +170,6 @@ in
       };
     };
 
-    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.settings.bind_port ];
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.settings.bind_port or defaultBindPort ];
   };
 }
diff --git a/nixos/modules/services/networking/dae.nix b/nixos/modules/services/networking/dae.nix
index 231c555b3303..42ed3c7f8d4a 100644
--- a/nixos/modules/services/networking/dae.nix
+++ b/nixos/modules/services/networking/dae.nix
@@ -1,41 +1,161 @@
-{ config, pkgs, lib, ... }:
+{ config, lib, pkgs, ... }:
+
 let
   cfg = config.services.dae;
+  assets = cfg.assets;
+  genAssetsDrv = paths: pkgs.symlinkJoin {
+    name = "dae-assets";
+    inherit paths;
+  };
 in
 {
-  meta.maintainers = with lib.maintainers; [ pokon548 ];
+  meta.maintainers = with lib.maintainers; [ pokon548 oluceps ];
 
   options = {
-    services.dae = {
-      enable = lib.options.mkEnableOption (lib.mdDoc "the dae service");
-      package = lib.mkPackageOptionMD pkgs "dae" { };
+    services.dae = with lib;{
+      enable = mkEnableOption
+        (mdDoc "A Linux high-performance transparent proxy solution based on eBPF");
+
+      package = mkPackageOptionMD pkgs "dae" { };
+
+      assets = mkOption {
+        type = with types;(listOf path);
+        default = with pkgs; [ v2ray-geoip v2ray-domain-list-community ];
+        defaultText = literalExpression "with pkgs; [ v2ray-geoip v2ray-domain-list-community ]";
+        description = mdDoc ''
+          Assets required to run dae.
+        '';
+      };
+
+      assetsPath = mkOption {
+        type = types.str;
+        default = "${genAssetsDrv assets}/share/v2ray";
+        defaultText = literalExpression ''
+          (symlinkJoin {
+              name = "dae-assets";
+              paths = assets;
+          })/share/v2ray
+        '';
+        description = mdDoc ''
+          The path which contains geolocation database.
+          This option will override `assets`.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = with types; submodule {
+          options = {
+            enable = mkEnableOption "enable";
+            port = mkOption {
+              type = types.int;
+              description = ''
+                Port to be opened. Consist with field `tproxy_port` in config file.
+              '';
+            };
+          };
+        };
+        default = {
+          enable = true;
+          port = 12345;
+        };
+        defaultText = literalExpression ''
+          {
+            enable = true;
+            port = 12345;
+          }
+        '';
+        description = mdDoc ''
+          Open the firewall port.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = "/etc/dae/config.dae";
+        example = "/path/to/your/config.dae";
+        description = mdDoc ''
+          The path of dae config file, end with `.dae`.
+        '';
+      };
+
+      config = mkOption {
+        type = types.str;
+        default = ''
+          global{}
+          routing{}
+        '';
+        description = mdDoc ''
+          Config text for dae.
+
+          See <https://github.com/daeuniverse/dae/blob/main/example.dae>.
+        '';
+      };
+
+      disableTxChecksumIpGeneric =
+        mkEnableOption (mdDoc "See <https://github.com/daeuniverse/dae/issues/43>");
+
     };
   };
 
-  config = lib.mkIf config.services.dae.enable {
-    networking.firewall.allowedTCPPorts = [ 12345 ];
-    networking.firewall.allowedUDPPorts = [ 12345 ];
+  config = lib.mkIf cfg.enable
+
+    {
+      environment.systemPackages = [ cfg.package ];
+      systemd.packages = [ cfg.package ];
 
-    systemd.services.dae = {
-      unitConfig = {
-        Description = "dae Service";
-        Documentation = "https://github.com/daeuniverse/dae";
-        After = [ "network-online.target" "systemd-sysctl.service" ];
-        Wants = [ "network-online.target" ];
+      environment.etc."dae/config.dae" = {
+        mode = "0400";
+        source = pkgs.writeText "config.dae" cfg.config;
       };
 
-      serviceConfig = {
-        User = "root";
-        ExecStartPre = "${lib.getExe cfg.package} validate -c /etc/dae/config.dae";
-        ExecStart = "${lib.getExe cfg.package} run --disable-timestamp -c /etc/dae/config.dae";
-        ExecReload = "${lib.getExe cfg.package} reload $MAINPID";
-        LimitNPROC = 512;
-        LimitNOFILE = 1048576;
-        Restart = "on-abnormal";
-        Type = "notify";
+      networking = lib.mkIf cfg.openFirewall.enable {
+        firewall =
+          let portToOpen = cfg.openFirewall.port;
+          in
+          {
+            allowedTCPPorts = [ portToOpen ];
+            allowedUDPPorts = [ portToOpen ];
+          };
       };
 
-      wantedBy = [ "multi-user.target" ];
+      systemd.services.dae =
+        let
+          daeBin = lib.getExe cfg.package;
+          TxChecksumIpGenericWorkaround = with lib;(getExe pkgs.writeShellApplication {
+            name = "disable-tx-checksum-ip-generic";
+            text = with pkgs; ''
+              iface=$(${iproute2}/bin/ip route | ${lib.getExe gawk} '/default/ {print $5}')
+              ${lib.getExe ethtool} -K "$iface" tx-checksum-ip-generic off
+            '';
+          });
+        in
+        {
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            ExecStartPre = [ "" "${daeBin} validate -c ${cfg.configFile}" ]
+              ++ (with lib; optional cfg.disableTxChecksumIpGeneric TxChecksumIpGenericWorkaround);
+            ExecStart = [ "" "${daeBin} run --disable-timestamp -c ${cfg.configFile}" ];
+            Environment = "DAE_LOCATION_ASSET=${cfg.assetsPath}";
+          };
+        };
+
+      assertions = [
+        {
+          assertion = lib.pathExists (toString (genAssetsDrv cfg.assets) + "/share/v2ray");
+          message = ''
+            Packages in `assets` has no preset paths included.
+            Please set `assetsPath` instead.
+          '';
+        }
+
+        {
+          assertion = !((config.services.dae.config != "global{}\nrouting{}\n")
+            && (config.services.dae.configFile != "/etc/dae/config.dae"));
+          message = ''
+            Option `config` and `configFile` could not be set
+            at the same time.
+          '';
+        }
+      ];
     };
-  };
 }
diff --git a/nixos/modules/services/networking/dnscrypt-proxy2.nix b/nixos/modules/services/networking/dnscrypt-proxy2.nix
index de1ca0d2f206..4592a0c2f6b3 100644
--- a/nixos/modules/services/networking/dnscrypt-proxy2.nix
+++ b/nixos/modules/services/networking/dnscrypt-proxy2.nix
@@ -11,7 +11,7 @@ in
     settings = mkOption {
       description = lib.mdDoc ''
         Attrset that is converted and passed as TOML config file.
-        For available params, see: <https://github.com/DNSCrypt/dnscrypt-proxy/blob/${pkgs.dnscrypt-proxy2.version}/dnscrypt-proxy/example-dnscrypt-proxy.toml>
+        For available params, see: <https://github.com/DNSCrypt/dnscrypt-proxy/blob/${pkgs.dnscrypt-proxy.version}/dnscrypt-proxy/example-dnscrypt-proxy.toml>
       '';
       example = literalExpression ''
         {
@@ -49,7 +49,7 @@ in
         passAsFile = [ "json" ];
       } ''
         ${if cfg.upstreamDefaults then ''
-          ${pkgs.remarshal}/bin/toml2json ${pkgs.dnscrypt-proxy2.src}/dnscrypt-proxy/example-dnscrypt-proxy.toml > example.json
+          ${pkgs.remarshal}/bin/toml2json ${pkgs.dnscrypt-proxy.src}/dnscrypt-proxy/example-dnscrypt-proxy.toml > example.json
           ${pkgs.jq}/bin/jq --slurp add example.json $jsonPath > config.json # merges the two
         '' else ''
           cp $jsonPath config.json
@@ -80,7 +80,7 @@ in
         AmbientCapabilities = "CAP_NET_BIND_SERVICE";
         CacheDirectory = "dnscrypt-proxy";
         DynamicUser = true;
-        ExecStart = "${pkgs.dnscrypt-proxy2}/bin/dnscrypt-proxy -config ${cfg.configFile}";
+        ExecStart = "${pkgs.dnscrypt-proxy}/bin/dnscrypt-proxy -config ${cfg.configFile}";
         LockPersonality = true;
         LogsDirectory = "dnscrypt-proxy";
         MemoryDenyWriteExecute = true;
diff --git a/nixos/modules/services/networking/firewall-nftables.nix b/nixos/modules/services/networking/firewall-nftables.nix
index 452dd97d89d2..7c7136cc96f1 100644
--- a/nixos/modules/services/networking/firewall-nftables.nix
+++ b/nixos/modules/services/networking/firewall-nftables.nix
@@ -70,10 +70,8 @@ in
       }
     ];
 
-    networking.nftables.ruleset = ''
-
-      table inet nixos-fw {
-
+    networking.nftables.tables."nixos-fw".family = "inet";
+    networking.nftables.tables."nixos-fw".content = ''
         ${optionalString (cfg.checkReversePath != false) ''
           chain rpfilter {
             type filter hook prerouting priority mangle + 10; policy drop;
@@ -169,9 +167,6 @@ in
 
           }
         ''}
-
-      }
-
     '';
 
   };
diff --git a/nixos/modules/services/networking/frp.nix b/nixos/modules/services/networking/frp.nix
new file mode 100644
index 000000000000..e4f9a220b5e8
--- /dev/null
+++ b/nixos/modules/services/networking/frp.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.frp;
+  settingsFormat = pkgs.formats.ini { };
+  configFile = settingsFormat.generate "frp.ini" cfg.settings;
+  isClient = (cfg.role == "client");
+  isServer = (cfg.role == "server");
+in
+{
+  options = {
+    services.frp = {
+      enable = mkEnableOption (mdDoc "frp");
+
+      package = mkPackageOptionMD pkgs "frp" { };
+
+      role = mkOption {
+        type = types.enum [ "server" "client" ];
+        description = mdDoc ''
+          The frp consists of `client` and `server`. The server is usually
+          deployed on the machine with a public IP address, and
+          the client is usually deployed on the machine
+          where the Intranet service to be penetrated resides.
+        '';
+      };
+
+      settings = mkOption {
+        type = settingsFormat.type;
+        default = { };
+        description = mdDoc ''
+          Frp configuration, for configuration options
+          see the example of [client](https://github.com/fatedier/frp/blob/dev/conf/frpc_legacy_full.ini)
+          or [server](https://github.com/fatedier/frp/blob/dev/conf/frps_legacy_full.ini) on github.
+        '';
+        example = literalExpression ''
+          {
+            common = {
+              server_addr = "x.x.x.x";
+              server_port = 7000;
+            };
+          }
+        '';
+      };
+    };
+  };
+
+  config =
+    let
+      serviceCapability = optionals isServer [ "CAP_NET_BIND_SERVICE" ];
+      executableFile = if isClient then "frpc" else "frps";
+    in
+    mkIf cfg.enable {
+      systemd.services = {
+        frp = {
+          wants = optionals isClient [ "network-online.target" ];
+          after = if isClient then [ "network-online.target" ] else [ "network.target" ];
+          wantedBy = [ "multi-user.target" ];
+          description = "A fast reverse proxy frp ${cfg.role}";
+          serviceConfig = {
+            Type = "simple";
+            Restart = "on-failure";
+            RestartSec = 15;
+            ExecStart = "${cfg.package}/bin/${executableFile} -c ${configFile}";
+            StateDirectoryMode = optionalString isServer "0700";
+            DynamicUser = true;
+            # Hardening
+            UMask = optionalString isServer "0007";
+            CapabilityBoundingSet = serviceCapability;
+            AmbientCapabilities = serviceCapability;
+            PrivateDevices = true;
+            ProtectHostname = true;
+            ProtectClock = true;
+            ProtectKernelTunables = true;
+            ProtectKernelModules = true;
+            ProtectKernelLogs = true;
+            ProtectControlGroups = true;
+            RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ] ++ optionals isClient [ "AF_UNIX" ];
+            LockPersonality = true;
+            MemoryDenyWriteExecute = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+            PrivateMounts = true;
+            SystemCallArchitectures = "native";
+            SystemCallFilter = [ "@system-service" ];
+          };
+        };
+      };
+    };
+
+  meta.maintainers = with maintainers; [ zaldnoay ];
+}
diff --git a/nixos/modules/services/networking/frr.nix b/nixos/modules/services/networking/frr.nix
index d350fe3548ae..8488a4e4ef48 100644
--- a/nixos/modules/services/networking/frr.nix
+++ b/nixos/modules/services/networking/frr.nix
@@ -23,6 +23,7 @@ let
     "pbr"
     "bfd"
     "fabric"
+    "mgmt"
   ];
 
   allServices = services ++ [ "zebra" ];
diff --git a/nixos/modules/services/networking/jool.nix b/nixos/modules/services/networking/jool.nix
index 3aafbe40967c..d2d2b0956e8a 100644
--- a/nixos/modules/services/networking/jool.nix
+++ b/nixos/modules/services/networking/jool.nix
@@ -16,7 +16,7 @@ let
     TemporaryFileSystem = [ "/" ];
     BindReadOnlyPaths = [
       builtins.storeDir
-      "/run/current-system/kernel-modules"
+      "/run/booted-system/kernel-modules"
     ];
 
     # Give capabilities to load the module and configure it
@@ -31,26 +31,96 @@ let
 
   configFormat = pkgs.formats.json {};
 
-  mkDefaultAttrs = lib.mapAttrs (n: v: lib.mkDefault v);
+  # Generate the config file of instance `name`
+  nat64Conf = name:
+    configFormat.generate "jool-nat64-${name}.conf"
+      (cfg.nat64.${name} // { instance = name; });
+  siitConf = name:
+    configFormat.generate "jool-siit-${name}.conf"
+      (cfg.siit.${name} // { instance = name; });
+
+  # NAT64 config type
+  nat64Options = lib.types.submodule {
+    # The format is plain JSON
+    freeformType = configFormat.type;
+    # Some options with a default value
+    options.framework = lib.mkOption {
+      type = lib.types.enum [ "netfilter" "iptables" ];
+      default = "netfilter";
+      description = lib.mdDoc ''
+        The framework to use for attaching Jool's translation to the exist
+        kernel packet processing rules. See the
+        [documentation](https://nicmx.github.io/Jool/en/intro-jool.html#design)
+        for the differences between the two options.
+      '';
+    };
+    options.global.pool6 = lib.mkOption {
+      type = lib.types.strMatching "[[:xdigit:]:]+/[[:digit:]]+"
+        // { description = "Network prefix in CIDR notation"; };
+      default = "64:ff9b::/96";
+      description = lib.mdDoc ''
+        The prefix used for embedding IPv4 into IPv6 addresses.
+        Defaults to the well-known NAT64 prefix, defined by
+        [RFC 6052](https://datatracker.ietf.org/doc/html/rfc6052).
+      '';
+    };
+  };
 
-  defaultNat64 = {
-    instance = "default";
-    framework = "netfilter";
-    global.pool6 = "64:ff9b::/96";
+  # SIIT config type
+  siitOptions = lib.types.submodule {
+    # The format is, again, plain JSON
+    freeformType = configFormat.type;
+    # Some options with a default value
+    options = { inherit (nat64Options.getSubOptions []) framework; };
+  };
+
+  makeNat64Unit = name: opts: {
+    "jool-nat64-${name}" = {
+      description = "Jool, NAT64 setup of instance ${name}";
+      documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStartPre = "${pkgs.kmod}/bin/modprobe jool";
+        ExecStart    = "${jool-cli}/bin/jool file handle ${nat64Conf name}";
+        ExecStop     = "${jool-cli}/bin/jool -f ${nat64Conf name} instance remove";
+      } // hardening;
+    };
   };
-  defaultSiit = {
-    instance = "default";
-    framework = "netfilter";
+
+  makeSiitUnit = name: opts: {
+    "jool-siit-${name}" = {
+      description = "Jool, SIIT setup of instance ${name}";
+      documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStartPre = "${pkgs.kmod}/bin/modprobe jool_siit";
+        ExecStart    = "${jool-cli}/bin/jool_siit file handle ${siitConf name}";
+        ExecStop     = "${jool-cli}/bin/jool_siit -f ${siitConf name} instance remove";
+      } // hardening;
+    };
   };
 
-  nat64Conf = configFormat.generate "jool-nat64.conf" cfg.nat64.config;
-  siitConf  = configFormat.generate "jool-siit.conf" cfg.siit.config;
+  checkNat64 = name: _: ''
+    printf 'Validating Jool configuration for NAT64 instance "${name}"... '
+    jool file check ${nat64Conf name}
+    printf 'Ok.\n'; touch "$out"
+  '';
+
+  checkSiit = name: _: ''
+    printf 'Validating Jool configuration for SIIT instance "${name}"... '
+    jool_siit file check ${siitConf name}
+    printf 'Ok.\n'; touch "$out"
+  '';
 
 in
 
 {
-  ###### interface
-
   options = {
     networking.jool.enable = lib.mkOption {
       type = lib.types.bool;
@@ -64,157 +134,146 @@ in
         NAT64, analogous to the IPv4 NAPT. Refer to the upstream
         [documentation](https://nicmx.github.io/Jool/en/intro-xlat.html) for
         the supported modes of translation and how to configure them.
+
+        Enabling this option will install the Jool kernel module and the
+        command line tools for controlling it.
       '';
     };
 
-    networking.jool.nat64.enable = lib.mkEnableOption (lib.mdDoc "a NAT64 instance of Jool.");
-    networking.jool.nat64.config = lib.mkOption {
-      type = configFormat.type;
-      default = defaultNat64;
+    networking.jool.nat64 = lib.mkOption {
+      type = lib.types.attrsOf nat64Options;
+      default = { };
       example = lib.literalExpression ''
         {
-          # custom NAT64 prefix
-          global.pool6 = "2001:db8:64::/96";
-
-          # Port forwarding
-          bib = [
-            { # SSH 192.0.2.16 → 2001:db8:a::1
-              "protocol"     = "TCP";
-              "ipv4 address" = "192.0.2.16#22";
-              "ipv6 address" = "2001:db8:a::1#22";
-            }
-            { # DNS (TCP) 192.0.2.16 → 2001:db8:a::2
-              "protocol"     = "TCP";
-              "ipv4 address" = "192.0.2.16#53";
-              "ipv6 address" = "2001:db8:a::2#53";
-            }
-            { # DNS (UDP) 192.0.2.16 → 2001:db8:a::2
-              "protocol" = "UDP";
-              "ipv4 address" = "192.0.2.16#53";
-              "ipv6 address" = "2001:db8:a::2#53";
-            }
-          ];
-
-          pool4 = [
-            # Ports for dynamic translation
-            { protocol =  "TCP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
-            { protocol =  "UDP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
-            { protocol = "ICMP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
-
-            # Ports for static BIB entries
-            { protocol =  "TCP";  prefix = "192.0.2.16/32"; "port range" = "22"; }
-            { protocol =  "UDP";  prefix = "192.0.2.16/32"; "port range" = "53"; }
-          ];
+          default = {
+            # custom NAT64 prefix
+            global.pool6 = "2001:db8:64::/96";
+
+            # Port forwarding
+            bib = [
+              { # SSH 192.0.2.16 → 2001:db8:a::1
+                "protocol"     = "TCP";
+                "ipv4 address" = "192.0.2.16#22";
+                "ipv6 address" = "2001:db8:a::1#22";
+              }
+              { # DNS (TCP) 192.0.2.16 → 2001:db8:a::2
+                "protocol"     = "TCP";
+                "ipv4 address" = "192.0.2.16#53";
+                "ipv6 address" = "2001:db8:a::2#53";
+              }
+              { # DNS (UDP) 192.0.2.16 → 2001:db8:a::2
+                "protocol" = "UDP";
+                "ipv4 address" = "192.0.2.16#53";
+                "ipv6 address" = "2001:db8:a::2#53";
+              }
+            ];
+
+            pool4 = [
+              # Port ranges for dynamic translation
+              { protocol =  "TCP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
+              { protocol =  "UDP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
+              { protocol = "ICMP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
+
+              # Ports for static BIB entries
+              { protocol =  "TCP";  prefix = "192.0.2.16/32"; "port range" = "22"; }
+              { protocol =  "UDP";  prefix = "192.0.2.16/32"; "port range" = "53"; }
+            ];
+          };
         }
       '';
       description = lib.mdDoc ''
-        The configuration of a stateful NAT64 instance of Jool managed through
-        NixOS. See https://nicmx.github.io/Jool/en/config-atomic.html for the
-        available options.
+        Definitions of NAT64 instances of Jool.
+        See the
+        [documentation](https://nicmx.github.io/Jool/en/config-atomic.html) for
+        the available options. Also check out the
+        [tutorial](https://nicmx.github.io/Jool/en/run-nat64.html) for an
+        introduction to NAT64 and how to troubleshoot the setup.
+
+        The attribute name defines the name of the instance, with the main one
+        being `default`: this can be accessed from the command line without
+        specifying the name with `-i`.
 
         ::: {.note}
-        Existing or more instances created manually will not interfere with the
-        NixOS instance, provided the respective `pool4` addresses and port
-        ranges are not overlapping.
+        Instances created imperatively from the command line will not interfere
+        with the NixOS instances, provided the respective `pool4` addresses and
+        port ranges are not overlapping.
         :::
 
         ::: {.warning}
-        Changes to the NixOS instance performed via `jool instance nixos-nat64`
-        are applied correctly but will be lost after restarting
-        `jool-nat64.service`.
+        Changes to an instance performed via `jool -i <name>` are applied
+        correctly but will be lost after restarting the respective
+        `jool-nat64-<name>.service`.
         :::
       '';
     };
 
-    networking.jool.siit.enable = lib.mkEnableOption (lib.mdDoc "a SIIT instance of Jool.");
-    networking.jool.siit.config = lib.mkOption {
-      type = configFormat.type;
-      default = defaultSiit;
+    networking.jool.siit = lib.mkOption {
+      type = lib.types.attrsOf siitOptions;
+      default = { };
       example = lib.literalExpression ''
         {
-          # Maps any IPv4 address x.y.z.t to 2001:db8::x.y.z.t and v.v.
-          pool6 = "2001:db8::/96";
-
-          # Explicit address mappings
-          eamt = [
-            # 2001:db8:1:: ←→ 192.0.2.0
-            { "ipv6 prefix": "2001:db8:1::/128", "ipv4 prefix": "192.0.2.0" }
-            # 2001:db8:1::x ←→ 198.51.100.x
-            { "ipv6 prefix": "2001:db8:2::/120", "ipv4 prefix": "198.51.100.0/24" }
-          ]
+          default = {
+            # Maps any IPv4 address x.y.z.t to 2001:db8::x.y.z.t and v.v.
+            global.pool6 = "2001:db8::/96";
+
+            # Explicit address mappings
+            eamt = [
+              # 2001:db8:1:: ←→ 192.0.2.0
+              { "ipv6 prefix" = "2001:db8:1::/128"; "ipv4 prefix" = "192.0.2.0"; }
+              # 2001:db8:1::x ←→ 198.51.100.x
+              { "ipv6 prefix" = "2001:db8:2::/120"; "ipv4 prefix" = "198.51.100.0/24"; }
+            ];
+          };
         }
       '';
       description = lib.mdDoc ''
-        The configuration of a SIIT instance of Jool managed through
-        NixOS. See https://nicmx.github.io/Jool/en/config-atomic.html for the
-        available options.
+        Definitions of SIIT instances of Jool.
+        See the
+        [documentation](https://nicmx.github.io/Jool/en/config-atomic.html) for
+        the available options. Also check out the
+        [tutorial](https://nicmx.github.io/Jool/en/run-vanilla.html) for an
+        introduction to SIIT and how to troubleshoot the setup.
+
+        The attribute name defines the name of the instance, with the main one
+        being `default`: this can be accessed from the command line without
+        specifying the name with `-i`.
 
         ::: {.note}
-        Existing or more instances created manually will not interfere with the
-        NixOS instance, provided the respective `EAMT` address mappings are not
-        overlapping.
+        Instances created imperatively from the command line will not interfere
+        with the NixOS instances, provided the respective EAMT addresses and
+        port ranges are not overlapping.
         :::
 
         ::: {.warning}
-        Changes to the NixOS instance performed via `jool instance nixos-siit`
-        are applied correctly but will be lost after restarting
-        `jool-siit.service`.
+        Changes to an instance performed via `jool -i <name>` are applied
+        correctly but will be lost after restarting the respective
+        `jool-siit-<name>.service`.
         :::
       '';
     };
 
   };
 
-  ###### implementation
-
   config = lib.mkIf cfg.enable {
-    environment.systemPackages = [ jool-cli ];
+    # Install kernel module and cli tools
     boot.extraModulePackages = [ jool ];
+    environment.systemPackages = [ jool-cli ];
 
-    systemd.services.jool-nat64 = lib.mkIf cfg.nat64.enable {
-      description = "Jool, NAT64 setup";
-      documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
-      after = [ "network.target" ];
-      wantedBy = [ "multi-user.target" ];
-      reloadIfChanged = true;
-      serviceConfig = {
-        Type = "oneshot";
-        RemainAfterExit = true;
-        ExecStartPre = "${pkgs.kmod}/bin/modprobe jool";
-        ExecStart    = "${jool-cli}/bin/jool file handle ${nat64Conf}";
-        ExecStop     = "${jool-cli}/bin/jool -f ${nat64Conf} instance remove";
-      } // hardening;
-    };
-
-    systemd.services.jool-siit = lib.mkIf cfg.siit.enable {
-      description = "Jool, SIIT setup";
-      documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
-      after = [ "network.target" ];
-      wantedBy = [ "multi-user.target" ];
-      reloadIfChanged = true;
-      serviceConfig = {
-        Type = "oneshot";
-        RemainAfterExit = true;
-        ExecStartPre = "${pkgs.kmod}/bin/modprobe jool_siit";
-        ExecStart    = "${jool-cli}/bin/jool_siit file handle ${siitConf}";
-        ExecStop     = "${jool-cli}/bin/jool_siit -f ${siitConf} instance remove";
-      } // hardening;
-    };
-
-    system.checks = lib.singleton (pkgs.runCommand "jool-validated" {
-      nativeBuildInputs = [ pkgs.buildPackages.jool-cli ];
-      preferLocalBuild = true;
-    } ''
-      printf 'Validating Jool configuration... '
-      ${lib.optionalString cfg.siit.enable "jool_siit file check ${siitConf}"}
-      ${lib.optionalString cfg.nat64.enable "jool file check ${nat64Conf}"}
-      printf 'ok\n'
-      touch "$out"
-    '');
-
-    networking.jool.nat64.config = mkDefaultAttrs defaultNat64;
-    networking.jool.siit.config  = mkDefaultAttrs defaultSiit;
+    # Install services for each instance
+    systemd.services = lib.mkMerge
+      (lib.mapAttrsToList makeNat64Unit cfg.nat64 ++
+       lib.mapAttrsToList makeSiitUnit cfg.siit);
 
+    # Check the configuration of each instance
+    system.checks = lib.optional (cfg.nat64 != {} || cfg.siit != {})
+      (pkgs.runCommand "jool-validated"
+        {
+          nativeBuildInputs = with pkgs.buildPackages; [ jool-cli ];
+          preferLocalBuild = true;
+        }
+        (lib.concatStrings
+          (lib.mapAttrsToList checkNat64 cfg.nat64 ++
+           lib.mapAttrsToList checkSiit cfg.siit)));
   };
 
   meta.maintainers = with lib.maintainers; [ rnhmjoj ];
diff --git a/nixos/modules/services/networking/knot.nix b/nixos/modules/services/networking/knot.nix
index e97195d82919..d98c0ce25bf4 100644
--- a/nixos/modules/services/networking/knot.nix
+++ b/nixos/modules/services/networking/knot.nix
@@ -5,10 +5,110 @@ with lib;
 let
   cfg = config.services.knot;
 
-  configFile = pkgs.writeTextFile {
+  yamlConfig = let
+    result = assert secsCheck; nix2yaml cfg.settings;
+
+    secAllow = n: hasPrefix "mod-" n || elem n [
+      "module"
+      "server" "xdp" "control"
+      "log"
+      "statistics" "database"
+      "keystore" "key" "remote" "remotes" "acl" "submission" "policy"
+      "template"
+      "zone"
+      "include"
+    ];
+    secsCheck = let
+      secsBad = filter (n: !secAllow n) (attrNames cfg.settings);
+    in if secsBad == [] then true else throw
+      ("services.knot.settings contains unknown sections: " + toString secsBad);
+
+    nix2yaml = nix_def: concatStrings (
+        # We output the config section in the upstream-mandated order.
+        # Ordering is important due to forward-references not being allowed.
+        # See definition of conf_export and 'const yp_item_t conf_schema'
+        # upstream for reference.  Last updated for 3.3.
+        # When changing the set of sections, also update secAllow above.
+        [ (sec_list_fa "id" nix_def "module") ]
+        ++ map (sec_plain nix_def)
+          [ "server" "xdp" "control" ]
+        ++ [ (sec_list_fa "target" nix_def "log") ]
+        ++ map (sec_plain nix_def)
+          [  "statistics" "database" ]
+        ++ map (sec_list_fa "id" nix_def)
+          [ "keystore" "key" "remote" "remotes" "acl" "submission" "policy" ]
+
+        # Export module sections before the template section.
+        ++ map (sec_list_fa "id" nix_def) (filter (hasPrefix "mod-") (attrNames nix_def))
+
+        ++ [ (sec_list_fa "id" nix_def "template") ]
+        ++ [ (sec_list_fa "domain" nix_def "zone") ]
+        ++ [ (sec_plain nix_def "include") ]
+      );
+
+    # A plain section contains directly attributes (we don't really check that ATM).
+    sec_plain = nix_def: sec_name: if !hasAttr sec_name nix_def then "" else
+      n2y "" { ${sec_name} = nix_def.${sec_name}; };
+
+    # This section contains a list of attribute sets.  In each of the sets
+    # there's an attribute (`fa_name`, typically "id") that must exist and come first.
+    # Alternatively we support using attribute sets instead of lists; example diff:
+    # -template = [ { id = "default"; /* other attributes */ }   { id = "foo"; } ]
+    # +template = { default = {       /* those attributes */ };  foo = { };      }
+    sec_list_fa = fa_name: nix_def: sec_name: if !hasAttr sec_name nix_def then "" else
+      let
+        elem2yaml = fa_val: other_attrs:
+          "  - " + n2y "" { ${fa_name} = fa_val; }
+          + "    " + n2y "    " other_attrs
+          + "\n";
+        sec = nix_def.${sec_name};
+      in
+        sec_name + ":\n" +
+          (if isList sec
+            then flip concatMapStrings sec
+              (elem: elem2yaml elem.${fa_name} (removeAttrs elem [ fa_name ]))
+            else concatStrings (mapAttrsToList elem2yaml sec)
+          );
+
+    # This convertor doesn't care about ordering of attributes.
+    # TODO: it could probably be simplified even more, now that it's not
+    # to be used directly, but we might want some other tweaks, too.
+    n2y = indent: val:
+      if doRecurse val then concatStringsSep "\n${indent}"
+        (mapAttrsToList
+          # This is a bit wacky - set directly under a set would start on bad indent,
+          # so we start those on a new line, but not other types of attribute values.
+          (aname: aval: "${aname}:${if doRecurse aval then "\n${indent}  " else " "}"
+            + n2y (indent + "  ") aval)
+          val
+        )
+        + "\n"
+        else
+      /*
+      if isList val && stringLength indent < 4 then concatMapStrings
+        (elem: "\n${indent}- " + n2y (indent + "  ") elem)
+        val
+        else
+      */
+      if isList val /* and long indent */ then
+        "[ " + concatMapStringsSep ", " quoteString val + " ]" else
+      if isBool val then (if val then "on" else "off") else
+      quoteString val;
+
+    # We don't want paths like ./my-zone.txt be converted to plain strings.
+    quoteString = s: ''"${if builtins.typeOf s == "path" then s else toString s}"'';
+    # We don't want to walk the insides of derivation attributes.
+    doRecurse = val: isAttrs val && !isDerivation val;
+
+  in result;
+
+  configFile = if cfg.settingsFile != null then
+    assert cfg.settings == {} && cfg.keyFiles == [];
+    cfg.settingsFile
+  else pkgs.writeTextFile {
     name = "knot.conf";
-    text = (concatMapStringsSep "\n" (file: "include: ${file}") cfg.keyFiles) + "\n" +
-           cfg.extraConfig;
+    text = (concatMapStringsSep "\n" (file: "include: ${file}") cfg.keyFiles) + "\n" + yamlConfig;
+    # TODO: maybe we could do some checks even when private keys complicate this?
     checkPhase = lib.optionalString (cfg.keyFiles == []) ''
       ${cfg.package}/bin/knotc --config=$out conf-check
     '';
@@ -60,11 +160,21 @@ in {
         '';
       };
 
-      extraConfig = mkOption {
-        type = types.lines;
-        default = "";
+      settings = mkOption {
+        type = types.attrs;
+        default = {};
         description = lib.mdDoc ''
-          Extra lines to be added verbatim to knot.conf
+          Extra configuration as nix values.
+        '';
+      };
+
+      settingsFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          As alternative to ``settings``, you can provide whole configuration
+          directly in the almost-YAML format of Knot DNS.
+          You might want to utilize ``writeTextFile`` for this.
         '';
       };
 
@@ -78,6 +188,12 @@ in {
       };
     };
   };
+  imports = [
+    # Compatibility with NixOS 23.05.  At least partial, as it fails assert if used with keyFiles.
+    (mkChangedOptionModule [ "services" "knot" "extraConfig" ] [ "services" "knot" "settingsFile" ]
+      (config: pkgs.writeText "knot.conf" config.services.knot.extraConfig)
+    )
+  ];
 
   config = mkIf config.services.knot.enable {
     users.groups.knot = {};
@@ -87,6 +203,8 @@ in {
       description = "Knot daemon user";
     };
 
+    environment.etc."knot/knot.conf".source = configFile; # just for user's convenience
+
     systemd.services.knot = {
       unitConfig.Documentation = "man:knotd(8) man:knot.conf(5) man:knotc(8) https://www.knot-dns.cz/docs/${cfg.package.version}/html/";
       description = cfg.package.meta.description;
diff --git a/nixos/modules/services/networking/mtr-exporter.nix b/nixos/modules/services/networking/mtr-exporter.nix
index 43ebbbe96d05..af694c3e736b 100644
--- a/nixos/modules/services/networking/mtr-exporter.nix
+++ b/nixos/modules/services/networking/mtr-exporter.nix
@@ -2,63 +2,114 @@
 
 let
   inherit (lib)
-    maintainers types mkEnableOption mkOption mkIf
-    literalExpression escapeShellArg escapeShellArgs;
+    maintainers types literalExpression
+    escapeShellArg escapeShellArgs
+    mkEnableOption mkOption mkRemovedOptionModule mkIf mdDoc
+    optionalString concatMapStrings concatStringsSep;
+
   cfg = config.services.mtr-exporter;
+
+  jobsConfig = pkgs.writeText "mtr-exporter.conf" (concatMapStrings (job: ''
+    ${job.name} -- ${job.schedule} -- ${concatStringsSep " " job.flags} ${job.address}
+  '') cfg.jobs);
 in {
+  imports = [
+    (mkRemovedOptionModule [ "services" "mtr-exporter" "target" ] "Use services.mtr-exporter.jobs instead.")
+    (mkRemovedOptionModule [ "services" "mtr-exporter" "mtrFlags" ] "Use services.mtr-exporter.jobs.<job>.flags instead.")
+  ];
+
   options = {
     services = {
       mtr-exporter = {
-        enable = mkEnableOption (lib.mdDoc "a Prometheus exporter for MTR");
+        enable = mkEnableOption (mdDoc "a Prometheus exporter for MTR");
 
-        target = mkOption {
+        address = mkOption {
           type = types.str;
-          example = "example.org";
-          description = lib.mdDoc "Target to check using MTR.";
-        };
-
-        interval = mkOption {
-          type = types.int;
-          default = 60;
-          description = lib.mdDoc "Interval between MTR checks in seconds.";
+          default = "127.0.0.1";
+          description = lib.mdDoc "Listen address for MTR exporter.";
         };
 
         port = mkOption {
           type = types.port;
           default = 8080;
-          description = lib.mdDoc "Listen port for MTR exporter.";
+          description = mdDoc "Listen port for MTR exporter.";
         };
 
-        address = mkOption {
-          type = types.str;
-          default = "127.0.0.1";
-          description = lib.mdDoc "Listen address for MTR exporter.";
+        extraFlags = mkOption {
+          type = types.listOf types.str;
+          default = [];
+          example = ["-flag.deprecatedMetrics"];
+          description = mdDoc ''
+            Extra command line options to pass to MTR exporter.
+          '';
         };
 
-        mtrFlags = mkOption {
-          type = with types; listOf str;
-          default = [];
-          example = ["-G1"];
-          description = lib.mdDoc "Additional flags to pass to MTR.";
+        package = mkOption {
+          type = types.package;
+          default = pkgs.mtr-exporter;
+          defaultText = literalExpression "pkgs.mtr-exporter";
+          description = mdDoc "The MTR exporter package to use.";
+        };
+
+        mtrPackage = mkOption {
+          type = types.package;
+          default = pkgs.mtr;
+          defaultText = literalExpression "pkgs.mtr";
+          description = mdDoc "The MTR package to use.";
+        };
+
+        jobs = mkOption {
+          description = mdDoc "List of MTR jobs. Will be added to /etc/mtr-exporter.conf";
+          type = types.nonEmptyListOf (types.submodule {
+            options = {
+              name = mkOption {
+                type = types.str;
+                description = mdDoc "Name of ICMP pinging job.";
+              };
+
+              address = mkOption {
+                type = types.str;
+                example = "host.example.org:1234";
+                description = mdDoc "Target address for MTR client.";
+              };
+
+              schedule = mkOption {
+                type = types.str;
+                default = "@every 60s";
+                example = "@hourly";
+                description = mdDoc "Schedule of MTR checks. Also accepts Cron format.";
+              };
+
+              flags = mkOption {
+                type = with types; listOf str;
+                default = [];
+                example = ["-G1"];
+                description = mdDoc "Additional flags to pass to MTR.";
+              };
+            };
+          });
         };
       };
     };
   };
 
   config = mkIf cfg.enable {
+    environment.etc."mtr-exporter.conf" = {
+      source = jobsConfig;
+    };
+
     systemd.services.mtr-exporter = {
-      script = ''
-        exec ${pkgs.mtr-exporter}/bin/mtr-exporter \
-          -mtr ${pkgs.mtr}/bin/mtr \
-          -schedule '@every ${toString cfg.interval}s' \
-          -bind ${escapeShellArg cfg.address}:${toString cfg.port} \
-          -- \
-          ${escapeShellArgs (cfg.mtrFlags ++ [ cfg.target ])}
-      '';
       wantedBy = [ "multi-user.target" ];
       requires = [ "network.target" ];
       after = [ "network.target" ];
       serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/mtr-exporter \
+            -mtr '${cfg.mtrPackage}/bin/mtr' \
+            -bind ${escapeShellArg "${cfg.address}:${toString cfg.port}"} \
+            -jobs '${jobsConfig}' \
+            ${escapeShellArgs cfg.extraFlags}
+        '';
         Restart = "on-failure";
         # Hardening
         CapabilityBoundingSet = [ "" ];
diff --git a/nixos/modules/services/networking/nat-nftables.nix b/nixos/modules/services/networking/nat-nftables.nix
index 483910a16658..4b2317ca2ffc 100644
--- a/nixos/modules/services/networking/nat-nftables.nix
+++ b/nixos/modules/services/networking/nat-nftables.nix
@@ -145,28 +145,28 @@ in
       }
     ];
 
-    networking.nftables.ruleset = ''
-      table ip nixos-nat {
-        ${mkTable {
+    networking.nftables.tables = {
+      "nixos-nat" = {
+        family = "ip";
+        content = mkTable {
           ipVer = "ip";
           inherit dest ipSet;
           forwardPorts = filter (x: !(isIPv6 x.destination)) cfg.forwardPorts;
           inherit (cfg) dmzHost;
-        }}
-      }
-
-      ${optionalString cfg.enableIPv6 ''
-        table ip6 nixos-nat {
-          ${mkTable {
-            ipVer = "ip6";
-            dest = destIPv6;
-            ipSet = ipv6Set;
-            forwardPorts = filter (x: isIPv6 x.destination) cfg.forwardPorts;
-            dmzHost = null;
-          }}
-        }
-      ''}
-    '';
+        };
+      };
+      "nixos-nat6" = mkIf cfg.enableIPv6 {
+        family = "ip6";
+        name = "nixos-nat";
+        content = mkTable {
+          ipVer = "ip6";
+          dest = destIPv6;
+          ipSet = ipv6Set;
+          forwardPorts = filter (x: isIPv6 x.destination) cfg.forwardPorts;
+          dmzHost = null;
+        };
+      };
+    };
 
     networking.firewall.extraForwardRules = optionalString config.networking.firewall.filterForward ''
       ${optionalString (ifaceSet != "") ''
diff --git a/nixos/modules/services/networking/networkmanager.nix b/nixos/modules/services/networking/networkmanager.nix
index e28f96f7a6d6..53c847ee3ca2 100644
--- a/nixos/modules/services/networking/networkmanager.nix
+++ b/nixos/modules/services/networking/networkmanager.nix
@@ -5,7 +5,7 @@ with lib;
 let
   cfg = config.networking.networkmanager;
 
-  delegateWireless = config.networking.wireless.enable == true && cfg.unmanaged != [];
+  delegateWireless = config.networking.wireless.enable == true && cfg.unmanaged != [ ];
 
   enableIwd = cfg.wifi.backend == "iwd";
 
@@ -30,17 +30,15 @@ let
   configFile = pkgs.writeText "NetworkManager.conf" (lib.concatStringsSep "\n" [
     (mkSection "main" {
       plugins = "keyfile";
-      dhcp = cfg.dhcp;
-      dns = cfg.dns;
+      inherit (cfg) dhcp dns;
       # If resolvconf is disabled that means that resolv.conf is managed by some other module.
       rc-manager =
         if config.networking.resolvconf.enable then "resolvconf"
         else "unmanaged";
-      firewall-backend = cfg.firewallBackend;
     })
     (mkSection "keyfile" {
       unmanaged-devices =
-        if cfg.unmanaged == [] then null
+        if cfg.unmanaged == [ ] then null
         else lib.concatStringsSep ";" cfg.unmanaged;
     })
     (mkSection "logging" {
@@ -103,7 +101,7 @@ let
   };
 
   macAddressOpt = mkOption {
-    type = types.either types.str (types.enum ["permanent" "preserve" "random" "stable"]);
+    type = types.either types.str (types.enum [ "permanent" "preserve" "random" "stable" ]);
     default = "preserve";
     example = "00:11:22:33:44:55";
     description = lib.mdDoc ''
@@ -126,7 +124,8 @@ let
     pkgs.wpa_supplicant
   ];
 
-in {
+in
+{
 
   meta = {
     maintainers = teams.freedesktop.members;
@@ -156,7 +155,7 @@ in {
           int
           str
         ]));
-        default = {};
+        default = { };
         description = lib.mdDoc ''
           Configuration for the [connection] section of NetworkManager.conf.
           Refer to
@@ -186,7 +185,7 @@ in {
 
       unmanaged = mkOption {
         type = types.listOf types.str;
-        default = [];
+        default = [ ];
         description = lib.mdDoc ''
           List of interfaces that will not be managed by NetworkManager.
           Interface name can be specified here, but if you need more fidelity,
@@ -232,15 +231,6 @@ in {
         '';
       };
 
-      firewallBackend = mkOption {
-        type = types.enum [ "iptables" "nftables" "none" ];
-        default = "iptables";
-        description = lib.mdDoc ''
-          Which firewall backend should be used for configuring masquerading with shared mode.
-          If set to none, NetworkManager doesn't manage the configuration at all.
-        '';
-      };
-
       logLevel = mkOption {
         type = types.enum [ "OFF" "ERR" "WARN" "INFO" "DEBUG" "TRACE" ];
         default = "WARN";
@@ -251,7 +241,7 @@ in {
 
       appendNameservers = mkOption {
         type = types.listOf types.str;
-        default = [];
+        default = [ ];
         description = lib.mdDoc ''
           A list of name servers that should be appended
           to the ones configured in NetworkManager or received by DHCP.
@@ -260,7 +250,7 @@ in {
 
       insertNameservers = mkOption {
         type = types.listOf types.str;
-        default = [];
+        default = [ ];
         description = lib.mdDoc ''
           A list of name servers that should be inserted before
           the ones configured in NetworkManager or received by DHCP.
@@ -336,23 +326,23 @@ in {
             };
           };
         });
-        default = [];
+        default = [ ];
         example = literalExpression ''
-        [ {
-              source = pkgs.writeText "upHook" '''
-
-                if [ "$2" != "up" ]; then
-                    logger "exit: event $2 != up"
-                    exit
-                fi
-
-                # coreutils and iproute are in PATH too
-                logger "Device $DEVICE_IFACE coming up"
+          [ {
+            source = pkgs.writeText "upHook" '''
+              if [ "$2" != "up" ]; then
+                logger "exit: event $2 != up"
+                exit
+              fi
+
+              # coreutils and iproute are in PATH too
+              logger "Device $DEVICE_IFACE coming up"
             ''';
             type = "basic";
-        } ]'';
+          } ]
+        '';
         description = lib.mdDoc ''
-          A list of scripts which will be executed in response to  network  events.
+          A list of scripts which will be executed in response to network events.
         '';
       };
 
@@ -369,14 +359,24 @@ in {
         '';
       };
 
-      enableFccUnlock = mkOption {
-        type = types.bool;
-        default = false;
+      fccUnlockScripts = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            id = mkOption {
+              type = types.str;
+              description = lib.mdDoc "vid:pid of either the PCI or USB vendor and product ID";
+            };
+            path = mkOption {
+              type = types.path;
+              description = lib.mdDoc "Path to the unlock script";
+            };
+          };
+        });
+        default = [ ];
+        example = literalExpression ''[{ name = "03f0:4e1d"; script = "''${pkgs.modemmanager}/share/ModemManager/fcc-unlock.available.d/03f0:4e1d"; }]'';
         description = lib.mdDoc ''
-          Enable FCC unlock procedures. Since release 1.18.4, the ModemManager daemon no longer
-          automatically performs the FCC unlock procedure by default. See
-          [the docs](https://modemmanager.org/docs/modemmanager/fcc-unlock/)
-          for more details.
+          List of FCC unlock scripts to enable on the system, behaving as described in
+          https://modemmanager.org/docs/modemmanager/fcc-unlock/#integration-with-third-party-fcc-unlock-tools.
         '';
       };
     };
@@ -387,7 +387,14 @@ in {
       [ "networking" "networkmanager" "packages" ]
       [ "networking" "networkmanager" "plugins" ])
     (mkRenamedOptionModule [ "networking" "networkmanager" "useDnsmasq" ] [ "networking" "networkmanager" "dns" ])
-    (mkRemovedOptionModule ["networking" "networkmanager" "dynamicHosts"] ''
+    (mkRemovedOptionModule [ "networking" "networkmanager" "enableFccUnlock" ] ''
+      This option was removed, because using bundled FCC unlock scripts is risky,
+      might conflict with vendor-provided unlock scripts, and should
+      be a conscious decision on a per-device basis.
+      Instead it's recommended to use the
+      `networking.networkmanager.fccUnlockScripts` option.
+    '')
+    (mkRemovedOptionModule [ "networking" "networkmanager" "dynamicHosts" ] ''
       This option was removed because allowing (multiple) regular users to
       override host entries affecting the whole system opens up a huge attack
       vector. There seem to be very rare cases where this might be useful.
@@ -395,6 +402,9 @@ in {
       them via the DNS server in your network, or use environment.etc
       to add a file into /etc/NetworkManager/dnsmasq.d reconfiguring hostsdir.
     '')
+    (mkRemovedOptionModule [ "networking" "networkmanager" "firewallBackend" ] ''
+      This option was removed as NixOS is now using iptables-nftables-compat even when using iptables, therefore Networkmanager now uses the nftables backend unconditionally.
+    '')
   ];
 
 
@@ -403,7 +413,8 @@ in {
   config = mkIf cfg.enable {
 
     assertions = [
-      { assertion = config.networking.wireless.enable == true -> cfg.unmanaged != [];
+      {
+        assertion = config.networking.wireless.enable == true -> cfg.unmanaged != [ ];
         message = ''
           You can not use networking.networkmanager with networking.wireless.
           Except if you mark some interfaces as <literal>unmanaged</literal> by NetworkManager.
@@ -414,25 +425,29 @@ in {
     hardware.wirelessRegulatoryDatabase = true;
 
     environment.etc = {
-        "NetworkManager/NetworkManager.conf".source = configFile;
-      }
-      // builtins.listToAttrs (map (pkg: nameValuePair "NetworkManager/${pkg.networkManagerPlugin}" {
+      "NetworkManager/NetworkManager.conf".source = configFile;
+    }
+    // builtins.listToAttrs (map
+      (pkg: nameValuePair "NetworkManager/${pkg.networkManagerPlugin}" {
         source = "${pkg}/lib/NetworkManager/${pkg.networkManagerPlugin}";
-      }) cfg.plugins)
-      // optionalAttrs cfg.enableFccUnlock
-         {
-           "ModemManager/fcc-unlock.d".source =
-             "${pkgs.modemmanager}/share/ModemManager/fcc-unlock.available.d/*";
-         }
-      // optionalAttrs (cfg.appendNameservers != [] || cfg.insertNameservers != [])
-         {
-           "NetworkManager/dispatcher.d/02overridedns".source = overrideNameserversScript;
-         }
-      // listToAttrs (lib.imap1 (i: s:
-         {
-            name = "NetworkManager/dispatcher.d/${dispatcherTypesSubdirMap.${s.type}}03userscript${lib.fixedWidthNumber 4 i}";
-            value = { mode = "0544"; inherit (s) source; };
-         }) cfg.dispatcherScripts);
+      })
+      cfg.plugins)
+    // builtins.listToAttrs (map
+      (e: nameValuePair "ModemManager/fcc-unlock.d/${e.id}" {
+        source = e.path;
+      })
+      cfg.fccUnlockScripts)
+    // optionalAttrs (cfg.appendNameservers != [ ] || cfg.insertNameservers != [ ])
+      {
+        "NetworkManager/dispatcher.d/02overridedns".source = overrideNameserversScript;
+      }
+    // listToAttrs (lib.imap1
+      (i: s:
+        {
+          name = "NetworkManager/dispatcher.d/${dispatcherTypesSubdirMap.${s.type}}03userscript${lib.fixedWidthNumber 4 i}";
+          value = { mode = "0544"; inherit (s) source; };
+        })
+      cfg.dispatcherScripts);
 
     environment.systemPackages = packages;
 
diff --git a/nixos/modules/services/networking/nftables.nix b/nixos/modules/services/networking/nftables.nix
index faff1dca89ba..a0afdb452752 100644
--- a/nixos/modules/services/networking/nftables.nix
+++ b/nixos/modules/services/networking/nftables.nix
@@ -2,6 +2,35 @@
 with lib;
 let
   cfg = config.networking.nftables;
+
+  tableSubmodule = { name, ... }: {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Enable this table.";
+      };
+
+      name = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Table name.";
+      };
+
+      content = mkOption {
+        type = types.lines;
+        description = lib.mdDoc "The table content.";
+      };
+
+      family = mkOption {
+        description = lib.mdDoc "Table family.";
+        type = types.enum [ "ip" "ip6" "inet" "arp" "bridge" "netdev" ];
+      };
+    };
+
+    config = {
+      name = mkDefault name;
+    };
+  };
 in
 {
   ###### interface
@@ -41,6 +70,26 @@ in
       '';
     };
 
+    networking.nftables.checkRulesetRedirects = mkOption {
+      type = types.addCheck (types.attrsOf types.path) (attrs: all types.path.check (attrNames attrs));
+      default = {
+        "/etc/hosts" = config.environment.etc.hosts.source;
+        "/etc/protocols" = config.environment.etc.protocols.source;
+        "/etc/services" = config.environment.etc.services.source;
+      };
+      defaultText = literalExpression ''
+        {
+          "/etc/hosts" = config.environment.etc.hosts.source;
+          "/etc/protocols" = config.environment.etc.protocols.source;
+          "/etc/services" = config.environment.etc.services.source;
+        }
+      '';
+      description = mdDoc ''
+        Set of paths that should be intercepted and rewritten while checking the ruleset
+        using `pkgs.buildPackages.libredirect`.
+      '';
+    };
+
     networking.nftables.preCheckRuleset = mkOption {
       type = types.lines;
       default = "";
@@ -54,6 +103,24 @@ in
       '';
     };
 
+    networking.nftables.flushRuleset = mkEnableOption (lib.mdDoc "Flush the entire ruleset on each reload.");
+
+    networking.nftables.extraDeletions = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        # this makes deleting a non-existing table a no-op instead of an error
+        table inet some-table;
+
+        delete table inet some-table;
+      '';
+      description =
+        lib.mdDoc ''
+          Extra deletion commands to be run on every firewall start, reload
+          and after stopping the firewall.
+        '';
+    };
+
     networking.nftables.ruleset = mkOption {
       type = types.lines;
       default = "";
@@ -103,7 +170,10 @@ in
         lib.mdDoc ''
           The ruleset to be used with nftables.  Should be in a format that
           can be loaded using "/bin/nft -f".  The ruleset is updated atomically.
-          This option conflicts with rulesetFile.
+          Note that if the tables should be cleaned first, either:
+          - networking.nftables.flushRuleset = true; needs to be set (flushes all tables)
+          - networking.nftables.extraDeletions needs to be set
+          - or networking.nftables.tables can be used, which will clean up the table automatically
         '';
     };
     networking.nftables.rulesetFile = mkOption {
@@ -113,9 +183,64 @@ in
         lib.mdDoc ''
           The ruleset file to be used with nftables.  Should be in a format that
           can be loaded using "nft -f".  The ruleset is updated atomically.
-          This option conflicts with ruleset and nftables based firewall.
         '';
     };
+    networking.nftables.tables = mkOption {
+      type = types.attrsOf (types.submodule tableSubmodule);
+
+      default = {};
+
+      description = lib.mdDoc ''
+        Tables to be added to ruleset.
+        Tables will be added together with delete statements to clean up the table before every update.
+      '';
+
+      example = {
+        filter = {
+          family = "inet";
+          content = ''
+            # Check out https://wiki.nftables.org/ for better documentation.
+            # Table for both IPv4 and IPv6.
+            # Block all incoming connections traffic except SSH and "ping".
+            chain input {
+              type filter hook input priority 0;
+
+              # accept any localhost traffic
+              iifname lo accept
+
+              # accept traffic originated from us
+              ct state {established, related} accept
+
+              # ICMP
+              # routers may also want: mld-listener-query, nd-router-solicit
+              ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, packet-too-big, time-exceeded, parameter-problem, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert } accept
+              ip protocol icmp icmp type { destination-unreachable, router-advertisement, time-exceeded, parameter-problem } accept
+
+              # allow "ping"
+              ip6 nexthdr icmpv6 icmpv6 type echo-request accept
+              ip protocol icmp icmp type echo-request accept
+
+              # accept SSH connections (required for a server)
+              tcp dport 22 accept
+
+              # count and drop any other traffic
+              counter drop
+            }
+
+            # Allow all outgoing connections.
+            chain output {
+              type filter hook output priority 0;
+              accept
+            }
+
+            chain forward {
+              type filter hook forward priority 0;
+              accept
+            }
+          '';
+        };
+      };
+    };
   };
 
   ###### implementation
@@ -123,7 +248,8 @@ in
   config = mkIf cfg.enable {
     boot.blacklistedKernelModules = [ "ip_tables" ];
     environment.systemPackages = [ pkgs.nftables ];
-    networking.networkmanager.firewallBackend = mkDefault "nftables";
+    # versionOlder for backportability, remove afterwards
+    networking.nftables.flushRuleset = mkDefault (versionOlder config.system.stateVersion "23.11" || (cfg.rulesetFile != null || cfg.ruleset != ""));
     systemd.services.nftables = {
       description = "nftables firewall";
       before = [ "network-pre.target" ];
@@ -131,20 +257,51 @@ in
       wantedBy = [ "multi-user.target" ];
       reloadIfChanged = true;
       serviceConfig = let
+        enabledTables = filterAttrs (_: table: table.enable) cfg.tables;
+        deletionsScript = pkgs.writeScript "nftables-deletions" ''
+          #! ${pkgs.nftables}/bin/nft -f
+          ${if cfg.flushRuleset then "flush ruleset"
+            else concatStringsSep "\n" (mapAttrsToList (_: table: ''
+              table ${table.family} ${table.name}
+              delete table ${table.family} ${table.name}
+            '') enabledTables)}
+          ${cfg.extraDeletions}
+        '';
+        deletionsScriptVar = "/var/lib/nftables/deletions.nft";
+        ensureDeletions = pkgs.writeShellScript "nftables-ensure-deletions" ''
+          touch ${deletionsScriptVar}
+          chmod +x ${deletionsScriptVar}
+        '';
+        saveDeletionsScript = pkgs.writeShellScript "nftables-save-deletions" ''
+          cp ${deletionsScript} ${deletionsScriptVar}
+        '';
+        cleanupDeletionsScript = pkgs.writeShellScript "nftables-cleanup-deletions" ''
+          rm ${deletionsScriptVar}
+        '';
         rulesScript = pkgs.writeTextFile {
           name =  "nftables-rules";
           executable = true;
           text = ''
             #! ${pkgs.nftables}/bin/nft -f
-            flush ruleset
-            ${if cfg.rulesetFile != null then ''
+            # previous deletions, if any
+            include "${deletionsScriptVar}"
+            # current deletions
+            include "${deletionsScript}"
+            ${concatStringsSep "\n" (mapAttrsToList (_: table: ''
+              table ${table.family} ${table.name} {
+                ${table.content}
+              }
+            '') enabledTables)}
+            ${cfg.ruleset}
+            ${lib.optionalString (cfg.rulesetFile != null) ''
               include "${cfg.rulesetFile}"
-            '' else cfg.ruleset}
+            ''}
           '';
           checkPhase = lib.optionalString cfg.checkRuleset ''
             cp $out ruleset.conf
+            sed 's|include "${deletionsScriptVar}"||' -i ruleset.conf
             ${cfg.preCheckRuleset}
-            export NIX_REDIRECTS=/etc/protocols=${pkgs.buildPackages.iana-etc}/etc/protocols:/etc/services=${pkgs.buildPackages.iana-etc}/etc/services
+            export NIX_REDIRECTS=${escapeShellArg (concatStringsSep ":" (mapAttrsToList (n: v: "${n}=${v}") cfg.checkRulesetRedirects))}
             LD_PRELOAD="${pkgs.buildPackages.libredirect}/lib/libredirect.so ${pkgs.buildPackages.lklWithFirewall.lib}/lib/liblkl-hijack.so" \
               ${pkgs.buildPackages.nftables}/bin/nft --check --file ruleset.conf
           '';
@@ -152,9 +309,11 @@ in
       in {
         Type = "oneshot";
         RemainAfterExit = true;
-        ExecStart = rulesScript;
-        ExecReload = rulesScript;
-        ExecStop = "${pkgs.nftables}/bin/nft flush ruleset";
+        ExecStart = [ ensureDeletions rulesScript ];
+        ExecStartPost = saveDeletionsScript;
+        ExecReload = [ ensureDeletions rulesScript saveDeletionsScript ];
+        ExecStop = [ deletionsScriptVar cleanupDeletionsScript ];
+        StateDirectory = "nftables";
       };
     };
   };
diff --git a/nixos/modules/services/networking/nncp.nix b/nixos/modules/services/networking/nncp.nix
new file mode 100644
index 000000000000..3cfe41995e76
--- /dev/null
+++ b/nixos/modules/services/networking/nncp.nix
@@ -0,0 +1,131 @@
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  nncpCfgFile = "/run/nncp.hjson";
+  programCfg = config.programs.nncp;
+  callerCfg = config.services.nncp.caller;
+  daemonCfg = config.services.nncp.daemon;
+  settingsFormat = pkgs.formats.json { };
+  jsonCfgFile = settingsFormat.generate "nncp.json" programCfg.settings;
+  pkg = programCfg.package;
+in {
+  options = {
+
+    services.nncp = {
+      caller = {
+        enable = mkEnableOption ''
+          cron'ed NNCP TCP daemon caller.
+          The daemon will take configuration from
+          [](#opt-programs.nncp.settings)
+        '';
+        extraArgs = mkOption {
+          type = with types; listOf str;
+          description = "Extra command-line arguments to pass to caller.";
+          default = [ ];
+          example = [ "-autotoss" ];
+        };
+      };
+
+      daemon = {
+        enable = mkEnableOption ''
+          NNCP TCP synronization daemon.
+          The daemon will take configuration from
+          [](#opt-programs.nncp.settings)
+        '';
+        socketActivation = {
+          enable = mkEnableOption ''
+            Whether to run nncp-daemon persistently or socket-activated.
+          '';
+          listenStreams = mkOption {
+            type = with types; listOf str;
+            description = lib.mdDoc ''
+              TCP sockets to bind to.
+              See [](#opt-systemd.sockets._name_.listenStreams).
+            '';
+            default = [ "5400" ];
+          };
+        };
+        extraArgs = mkOption {
+          type = with types; listOf str;
+          description = "Extra command-line arguments to pass to daemon.";
+          default = [ ];
+          example = [ "-autotoss" ];
+        };
+      };
+
+    };
+  };
+
+  config = mkIf (programCfg.enable or callerCfg.enable or daemonCfg.enable) {
+
+    assertions = [{
+      assertion = with builtins;
+        let
+          callerCongfigured =
+            let neigh = config.programs.nncp.settings.neigh or { };
+            in lib.lists.any (x: hasAttr "calls" x && x.calls != [ ])
+            (attrValues neigh);
+        in !callerCfg.enable || callerCongfigured;
+      message = "NNCP caller enabled but call configuration is missing";
+    }];
+
+    systemd.services."nncp-caller" = {
+      inherit (callerCfg) enable;
+      description = "Croned NNCP TCP daemon caller.";
+      documentation = [ "http://www.nncpgo.org/nncp_002dcaller.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg}/bin/nncp-caller -noprogress -cfg "${nncpCfgFile}" ${
+            lib.strings.escapeShellArgs callerCfg.extraArgs
+          }'';
+        Group = "uucp";
+        UMask = "0002";
+      };
+    };
+
+    systemd.services."nncp-daemon" = mkIf daemonCfg.enable {
+      enable = !daemonCfg.socketActivation.enable;
+      description = "NNCP TCP syncronization daemon.";
+      documentation = [ "http://www.nncpgo.org/nncp_002ddaemon.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg}/bin/nncp-daemon -noprogress -cfg "${nncpCfgFile}" ${
+            lib.strings.escapeShellArgs daemonCfg.extraArgs
+          }'';
+        Restart = "on-failure";
+        Group = "uucp";
+        UMask = "0002";
+      };
+    };
+
+    systemd.services."nncp-daemon@" = mkIf daemonCfg.socketActivation.enable {
+      description = "NNCP TCP syncronization daemon.";
+      documentation = [ "http://www.nncpgo.org/nncp_002ddaemon.html" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg}/bin/nncp-daemon -noprogress -ucspi -cfg "${nncpCfgFile}" ${
+            lib.strings.escapeShellArgs daemonCfg.extraArgs
+          }'';
+        Group = "uucp";
+        UMask = "0002";
+        StandardInput = "socket";
+        StandardOutput = "inherit";
+        StandardError = "journal";
+      };
+    };
+
+    systemd.sockets.nncp-daemon = mkIf daemonCfg.socketActivation.enable {
+      inherit (daemonCfg.socketActivation) listenStreams;
+      description = "socket for NNCP TCP syncronization.";
+      conflicts = [ "nncp-daemon.service" ];
+      wantedBy = [ "sockets.target" ];
+      socketConfig.Accept = true;
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/privoxy.nix b/nixos/modules/services/networking/privoxy.nix
index 78d02aaa1125..619490a4c020 100644
--- a/nixos/modules/services/networking/privoxy.nix
+++ b/nixos/modules/services/networking/privoxy.nix
@@ -12,7 +12,7 @@ let
     else "${name} ${toString val}\n";
 
   configType = with types;
-    let atom = oneOf [ int bool string path ];
+    let atom = oneOf [ int bool str path ];
     in attrsOf (either atom (listOf atom))
     // { description = ''
           privoxy configuration type. The format consists of an attribute
diff --git a/nixos/modules/services/networking/ssh/sshd.nix b/nixos/modules/services/networking/ssh/sshd.nix
index e75239e059d3..bf2f5230c738 100644
--- a/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixos/modules/services/networking/ssh/sshd.nix
@@ -27,14 +27,11 @@ let
       mkValueString = mkValueStringSshd;
     } " ";});
 
-  configFile = settingsFormat.generate "config" cfg.settings;
-  sshconf = pkgs.runCommand "sshd.conf-validated" { nativeBuildInputs = [ validationPackage ]; } ''
+  configFile = settingsFormat.generate "sshd.conf-settings" cfg.settings;
+  sshconf = pkgs.runCommand "sshd.conf-final" { } ''
     cat ${configFile} - >$out <<EOL
     ${cfg.extraConfig}
     EOL
-
-    ssh-keygen -q -f mock-hostkey -N ""
-    sshd -t -f $out -h mock-hostkey
   '';
 
   cfg  = config.services.openssh;
@@ -577,6 +574,21 @@ in
         '')}
       '';
 
+    system.checks = [
+      (pkgs.runCommand "check-sshd-config"
+        {
+          nativeBuildInputs = [ validationPackage ];
+        } ''
+        ${concatMapStringsSep "\n"
+          (lport: "sshd -G -T -C lport=${toString lport} -f ${sshconf} > /dev/null")
+          cfg.ports}
+        ${concatMapStringsSep "\n"
+          (la: "sshd -G -T -C laddr=${la.addr},lport=${toString la.port} -f ${sshconf} > /dev/null")
+          cfg.listenAddresses}
+        touch $out
+      '')
+    ];
+
     assertions = [{ assertion = if cfg.settings.X11Forwarding then cfgc.setXAuthLocation else true;
                     message = "cannot enable X11 forwarding without setting xauth location";}
                   (let
diff --git a/nixos/modules/services/networking/tedicross.nix b/nixos/modules/services/networking/tedicross.nix
deleted file mode 100644
index cee7e11f4fb1..000000000000
--- a/nixos/modules/services/networking/tedicross.nix
+++ /dev/null
@@ -1,100 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  dataDir = "/var/lib/tedicross";
-  cfg = config.services.tedicross;
-  configJSON = pkgs.writeText "tedicross-settings.json" (builtins.toJSON cfg.config);
-  configYAML = pkgs.runCommand "tedicross-settings.yaml" { preferLocalBuild = true; } ''
-    ${pkgs.remarshal}/bin/json2yaml -i ${configJSON} -o $out
-  '';
-
-in {
-  options = {
-    services.tedicross = {
-      enable = mkEnableOption (lib.mdDoc "the TediCross Telegram-Discord bridge service");
-
-      config = mkOption {
-        type = types.attrs;
-        # from https://github.com/TediCross/TediCross/blob/master/example.settings.yaml
-        example = literalExpression ''
-          {
-            telegram = {
-              useFirstNameInsteadOfUsername = false;
-              colonAfterSenderName = false;
-              skipOldMessages = true;
-              sendEmojiWithStickers = true;
-            };
-            discord = {
-              useNickname = false;
-              skipOldMessages = true;
-              displayTelegramReplies = "embed";
-              replyLength = 100;
-            };
-            bridges = [
-              {
-                name = "Default bridge";
-                direction = "both";
-                telegram = {
-                  chatId = -123456789;
-                  relayJoinMessages = true;
-                  relayLeaveMessages = true;
-                  sendUsernames = true;
-                  ignoreCommands = true;
-                };
-                discord = {
-                  serverId = "DISCORD_SERVER_ID";
-                  channelId = "DISCORD_CHANNEL_ID";
-                  relayJoinMessages = true;
-                  relayLeaveMessages = true;
-                  sendUsernames = true;
-                  crossDeleteOnTelegram = true;
-                };
-              }
-            ];
-
-            debug = false;
-          }
-        '';
-        description = lib.mdDoc ''
-          {file}`settings.yaml` configuration as a Nix attribute set.
-          Secret tokens should be specified using {option}`environmentFile`
-          instead of this world-readable file.
-        '';
-      };
-
-      environmentFile = mkOption {
-        type = types.nullOr types.path;
-        default = null;
-        description = lib.mdDoc ''
-          File containing environment variables to be passed to the TediCross service,
-          in which secret tokens can be specified securely using the
-          `TELEGRAM_BOT_TOKEN` and `DISCORD_BOT_TOKEN`
-          keys.
-        '';
-      };
-    };
-  };
-
-  config = mkIf cfg.enable {
-    # from https://github.com/TediCross/TediCross/blob/master/guides/autostart/Linux.md
-    systemd.services.tedicross = {
-      description = "TediCross Telegram-Discord bridge service";
-      wantedBy = [ "multi-user.target" ];
-      wants = [ "network-online.target" ];
-      after = [ "network-online.target" ];
-      serviceConfig = {
-        Type = "simple";
-        ExecStart = "${pkgs.nodePackages.tedicross}/bin/tedicross --config='${configYAML}' --data-dir='${dataDir}'";
-        Restart = "always";
-        DynamicUser = true;
-        StateDirectory = baseNameOf dataDir;
-        EnvironmentFile = cfg.environmentFile;
-      };
-    };
-  };
-
-  meta.maintainers = with maintainers; [ pacien ];
-}
-
diff --git a/nixos/modules/services/networking/trust-dns.nix b/nixos/modules/services/networking/trust-dns.nix
index a3b4d12479b4..4196d124a2ab 100644
--- a/nixos/modules/services/networking/trust-dns.nix
+++ b/nixos/modules/services/networking/trust-dns.nix
@@ -1,5 +1,4 @@
 { config, lib, pkgs, ... }:
-
 let
   cfg = config.services.trust-dns;
   toml = pkgs.formats.toml { };
@@ -55,7 +54,7 @@ in
         defaultText = "pkgs.trust-dns";
         description = mdDoc ''
           Trust-dns package to use.
-          Only `bin/named` need be provided: the other trust-dns utilities (client and resolver) are not needed.
+          Only `bin/trust-dns` need be provided: the other trust-dns utilities (client and resolver) are not needed.
         '';
       };
       quiet = mkOption {
@@ -136,7 +135,7 @@ in
           flags =  (lib.optional cfg.debug "--debug") ++ (lib.optional cfg.quiet "--quiet");
           flagsStr = builtins.concatStringsSep " " flags;
         in ''
-          ${cfg.package}/bin/named --config ${configFile} ${flagsStr}
+          ${cfg.package}/bin/trust-dns --config ${configFile} ${flagsStr}
         '';
         Type = "simple";
         Restart = "on-failure";
diff --git a/nixos/modules/services/networking/websockify.nix b/nixos/modules/services/networking/websockify.nix
index 45a3487bd337..27ad8953d3fa 100644
--- a/nixos/modules/services/networking/websockify.nix
+++ b/nixos/modules/services/networking/websockify.nix
@@ -38,7 +38,7 @@ let cfg = config.services.networking.websockify; in {
       description = "Service to forward websocket connections to TCP connections (from port:to port %I)";
       script = ''
         IFS=':' read -a array <<< "$1"
-        ${pkgs.pythonPackages.websockify}/bin/websockify --ssl-only \
+        ${pkgs.python3Packages.websockify}/bin/websockify --ssl-only \
           --cert=${cfg.sslCert} --key=${cfg.sslKey} 0.0.0.0:''${array[0]} 0.0.0.0:''${array[1]}
       '';
       scriptArgs = "%i";
diff --git a/nixos/modules/services/networking/wg-quick.nix b/nixos/modules/services/networking/wg-quick.nix
index 34210580f538..68e0e06d0469 100644
--- a/nixos/modules/services/networking/wg-quick.nix
+++ b/nixos/modules/services/networking/wg-quick.nix
@@ -17,6 +17,8 @@ let
         type = with types; nullOr str;
         description = lib.mdDoc ''
           wg-quick .conf file, describing the interface.
+          Using this option can be a useful means of configuring WireGuard if
+          one has an existing .conf file.
           This overrides any other configuration interface configuration options.
           See wg-quick manpage for more details.
         '';
diff --git a/nixos/modules/services/networking/wstunnel.nix b/nixos/modules/services/networking/wstunnel.nix
index 067d5df48725..3c3ecc3e04d7 100644
--- a/nixos/modules/services/networking/wstunnel.nix
+++ b/nixos/modules/services/networking/wstunnel.nix
@@ -86,12 +86,12 @@ let
         description = mdDoc "Address and port to listen on. Setting the port to a value below 1024 will also give the process the required `CAP_NET_BIND_SERVICE` capability.";
         type = types.submodule hostPortSubmodule;
         default = {
-          address = "0.0.0.0";
+          host = "0.0.0.0";
           port = if config.enableHTTPS then 443 else 80;
         };
         defaultText = literalExpression ''
           {
-            address = "0.0.0.0";
+            host = "0.0.0.0";
             port = if enableHTTPS then 443 else 80;
           }
         '';
diff --git a/nixos/modules/services/search/typesense.nix b/nixos/modules/services/search/typesense.nix
index 856c3cad22df..c158d04fea23 100644
--- a/nixos/modules/services/search/typesense.nix
+++ b/nixos/modules/services/search/typesense.nix
@@ -83,12 +83,12 @@ in {
         Group = "typesense";
 
         StateDirectory = "typesense";
-        StateDirectoryMode = "0700";
+        StateDirectoryMode = "0750";
 
         # Hardening
         CapabilityBoundingSet = "";
         LockPersonality = true;
-        MemoryDenyWriteExecute = true;
+        # MemoryDenyWriteExecute = true; needed since 0.25.1
         NoNewPrivileges = true;
         PrivateUsers = true;
         PrivateTmp = true;
diff --git a/nixos/modules/services/security/kanidm.nix b/nixos/modules/services/security/kanidm.nix
index d8a99dee59f4..6f4d1dc382ab 100644
--- a/nixos/modules/services/security/kanidm.nix
+++ b/nixos/modules/services/security/kanidm.nix
@@ -137,7 +137,7 @@ in
       default = { };
       description = lib.mdDoc ''
         Settings for Kanidm, see
-        [the documentation](https://github.com/kanidm/kanidm/blob/master/kanidm_book/src/server_configuration.md)
+        [the documentation](https://kanidm.github.io/kanidm/stable/server_configuration.html)
         and [example configuration](https://github.com/kanidm/kanidm/blob/master/examples/server.toml)
         for possible values.
       '';
@@ -155,7 +155,7 @@ in
       };
       description = lib.mdDoc ''
         Configure Kanidm clients, needed for the PAM daemon. See
-        [the documentation](https://github.com/kanidm/kanidm/blob/master/kanidm_book/src/client_tools.md#kanidm-configuration)
+        [the documentation](https://kanidm.github.io/kanidm/stable/client_tools.html#kanidm-configuration)
         and [example configuration](https://github.com/kanidm/kanidm/blob/master/examples/config)
         for possible values.
       '';
@@ -173,7 +173,7 @@ in
       };
       description = lib.mdDoc ''
         Configure Kanidm unix daemon.
-        See [the documentation](https://github.com/kanidm/kanidm/blob/master/kanidm_book/src/pam_and_nsswitch.md#the-unix-daemon)
+        See [the documentation](https://kanidm.github.io/kanidm/stable/integrations/pam_and_nsswitch.html#the-unix-daemon)
         and [example configuration](https://github.com/kanidm/kanidm/blob/master/examples/unixd)
         for possible values.
       '';
diff --git a/nixos/modules/services/security/vaultwarden/default.nix b/nixos/modules/services/security/vaultwarden/default.nix
index d22e6b5b40cd..0517615a4c6a 100644
--- a/nixos/modules/services/security/vaultwarden/default.nix
+++ b/nixos/modules/services/security/vaultwarden/default.nix
@@ -60,10 +60,8 @@ in {
     config = mkOption {
       type = attrsOf (nullOr (oneOf [ bool int str ]));
       default = {
-        config = {
-          ROCKET_ADDRESS = "::1"; # default to localhost
-          ROCKET_PORT = 8222;
-        };
+        ROCKET_ADDRESS = "::1"; # default to localhost
+        ROCKET_PORT = 8222;
       };
       example = literalExpression ''
         {
diff --git a/nixos/modules/services/web-apps/calibre-web.nix b/nixos/modules/services/web-apps/calibre-web.nix
index 143decfc0917..80567db10c97 100644
--- a/nixos/modules/services/web-apps/calibre-web.nix
+++ b/nixos/modules/services/web-apps/calibre-web.nix
@@ -10,6 +10,8 @@ in
     services.calibre-web = {
       enable = mkEnableOption (lib.mdDoc "Calibre-Web");
 
+      package = lib.mkPackageOption pkgs "calibre-web" { };
+
       listen = {
         ip = mkOption {
           type = types.str;
@@ -73,6 +75,8 @@ in
           '';
         };
 
+        enableKepubify = mkEnableOption (lib.mdDoc "kebup conversion support");
+
         enableBookUploading = mkOption {
           type = types.bool;
           default = false;
@@ -106,7 +110,7 @@ in
     systemd.services.calibre-web = let
       appDb = "/var/lib/${cfg.dataDir}/app.db";
       gdriveDb = "/var/lib/${cfg.dataDir}/gdrive.db";
-      calibreWebCmd = "${pkgs.calibre-web}/bin/calibre-web -p ${appDb} -g ${gdriveDb}";
+      calibreWebCmd = "${cfg.package}/bin/calibre-web -p ${appDb} -g ${gdriveDb}";
 
       settings = concatStringsSep ", " (
         [
@@ -117,6 +121,7 @@ in
         ]
         ++ optional (cfg.options.calibreLibrary != null) "config_calibre_dir = '${cfg.options.calibreLibrary}'"
         ++ optional cfg.options.enableBookConversion "config_converterpath = '${pkgs.calibre}/bin/ebook-convert'"
+        ++ optional cfg.options.enableKepubify "config_kepubifypath = '${pkgs.kepubify}/bin/kepubify'"
       );
     in
       {
diff --git a/nixos/modules/services/web-apps/cloudlog.nix b/nixos/modules/services/web-apps/cloudlog.nix
index 9261de8d4354..da2cf93d7f1c 100644
--- a/nixos/modules/services/web-apps/cloudlog.nix
+++ b/nixos/modules/services/web-apps/cloudlog.nix
@@ -308,8 +308,6 @@ in
       pools.cloudlog = {
         inherit (cfg) user;
         group = config.services.nginx.group;
-        # cloudlog is currently broken on php 8.2
-        phpPackage = pkgs.php81;
         settings =  {
           "listen.owner" = config.services.nginx.user;
           "listen.group" = config.services.nginx.group;
diff --git a/nixos/modules/services/web-apps/galene.nix b/nixos/modules/services/web-apps/galene.nix
index 747b85f94c65..81fed8a0b99a 100644
--- a/nixos/modules/services/web-apps/galene.nix
+++ b/nixos/modules/services/web-apps/galene.nix
@@ -186,7 +186,7 @@ in
           ProtectSystem = "strict";
           ReadWritePaths = cfg.recordingsDir;
           RemoveIPC = true;
-          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_NETLINK" ];
           RestrictNamespaces = true;
           RestrictRealtime = true;
           RestrictSUIDSGID = true;
diff --git a/nixos/modules/services/web-apps/grocy.nix b/nixos/modules/services/web-apps/grocy.nix
index b0d3c44cea81..4d1084e295ff 100644
--- a/nixos/modules/services/web-apps/grocy.nix
+++ b/nixos/modules/services/web-apps/grocy.nix
@@ -115,9 +115,9 @@ in {
       user = "grocy";
       group = "nginx";
 
-      # PHP 8.1 is the only version which is supported/tested by upstream:
-      # https://github.com/grocy/grocy/blob/v4.0.0/README.md#platform-support
-      phpPackage = pkgs.php81;
+      # PHP 8.1 and 8.2 are the only version which are supported/tested by upstream:
+      # https://github.com/grocy/grocy/blob/v4.0.2/README.md#platform-support
+      phpPackage = pkgs.php82;
 
       inherit (cfg.phpfpm) settings;
 
@@ -130,6 +130,16 @@ in {
       };
     };
 
+    # After an update of grocy, the viewcache needs to be deleted. Otherwise grocy will not work
+    # https://github.com/grocy/grocy#how-to-update
+    systemd.services.grocy-setup = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "phpfpm-grocy.service" ];
+      script = ''
+        rm -rf ${cfg.dataDir}/viewcache/*
+      '';
+    };
+
     services.nginx = {
       enable = true;
       virtualHosts."${cfg.hostName}" = mkMerge [
diff --git a/nixos/modules/services/web-apps/healthchecks.nix b/nixos/modules/services/web-apps/healthchecks.nix
index b3fdb681e2f3..b92525075541 100644
--- a/nixos/modules/services/web-apps/healthchecks.nix
+++ b/nixos/modules/services/web-apps/healthchecks.nix
@@ -1,16 +1,16 @@
-{ config, lib, pkgs, buildEnv, ... }:
+{ config, lib, options, pkgs, buildEnv, ... }:
 
 with lib;
 
 let
   defaultUser = "healthchecks";
   cfg = config.services.healthchecks;
+  opt = options.services.healthchecks;
   pkg = cfg.package;
   boolToPython = b: if b then "True" else "False";
   environment = {
     PYTHONPATH = pkg.pythonPath;
     STATIC_ROOT = cfg.dataDir + "/static";
-    DB_NAME = "${cfg.dataDir}/healthchecks.sqlite";
   } // cfg.settings;
 
   environmentFile = pkgs.writeText "healthchecks-environment" (lib.generators.toKeyValue { } environment);
@@ -98,17 +98,24 @@ in
       description = lib.mdDoc ''
         Environment variables which are read by healthchecks `(local)_settings.py`.
 
-        Settings which are explicitly covered in options bewlow, are type-checked and/or transformed
+        Settings which are explicitly covered in options below, are type-checked and/or transformed
         before added to the environment, everything else is passed as a string.
 
         See <https://healthchecks.io/docs/self_hosted_configuration/>
         for a full documentation of settings.
 
-        We add two variables to this list inside the packages `local_settings.py.`
-        - STATIC_ROOT to set a state directory for dynamically generated static files.
-        - SECRET_KEY_FILE to read SECRET_KEY from a file at runtime and keep it out of /nix/store.
+        We add additional variables to this list inside the packages `local_settings.py.`
+        - `STATIC_ROOT` to set a state directory for dynamically generated static files.
+        - `SECRET_KEY_FILE` to read `SECRET_KEY` from a file at runtime and keep it out of
+          /nix/store.
+        - `_FILE` variants for several values that hold sensitive information in
+          [Healthchecks configuration](https://healthchecks.io/docs/self_hosted_configuration/) so
+          that they also can be read from a file and kept out of /nix/store. To see which values
+          have support for a `_FILE` variant, run:
+          - `nix-instantiate --eval --expr '(import <nixpkgs> {}).healthchecks.secrets'`
+          - or `nix eval 'nixpkgs#healthchecks.secrets'` if the flake support has been enabled.
       '';
-      type = types.submodule {
+      type = types.submodule (settings: {
         freeformType = types.attrsOf types.str;
         options = {
           ALLOWED_HOSTS = lib.mkOption {
@@ -143,8 +150,28 @@ in
             '';
             apply = boolToPython;
           };
+
+          DB = mkOption {
+            type = types.enum [ "sqlite" "postgres" "mysql" ];
+            default = "sqlite";
+            description = lib.mdDoc "Database engine to use.";
+          };
+
+          DB_NAME = mkOption {
+            type = types.str;
+            default =
+              if settings.config.DB == "sqlite"
+              then "${cfg.dataDir}/healthchecks.sqlite"
+              else "hc";
+            defaultText = lib.literalExpression ''
+              if config.${settings.options.DB} == "sqlite"
+              then "''${config.${opt.dataDir}}/healthchecks.sqlite"
+              else "hc"
+            '';
+            description = lib.mdDoc "Database name.";
+          };
         };
-      };
+      });
     };
   };
 
@@ -168,7 +195,7 @@ in
           StateDirectoryMode = mkIf (cfg.dataDir == "/var/lib/healthchecks") "0750";
         };
       in
-        {
+      {
         healthchecks-migration = {
           description = "Healthchecks migrations";
           wantedBy = [ "healthchecks.target" ];
diff --git a/nixos/modules/services/web-apps/honk.md b/nixos/modules/services/web-apps/honk.md
new file mode 100644
index 000000000000..f34085f7dc52
--- /dev/null
+++ b/nixos/modules/services/web-apps/honk.md
@@ -0,0 +1,23 @@
+# Honk {#module-services-honk}
+
+With Honk on NixOS you can quickly configure a complete ActivityPub server with
+minimal setup and support costs.
+
+## Basic usage {#module-services-honk-basic-usage}
+
+A minimal configuration looks like this:
+
+```nix
+{
+  services.honk = {
+    enable = true;
+    host = "0.0.0.0";
+    port = 8080;
+    username = "username";
+    passwordFile = "/etc/honk/password.txt";
+    servername = "honk.example.com";
+  };
+
+  networking.firewall.allowedTCPPorts = [ 8080 ];
+}
+```
diff --git a/nixos/modules/services/web-apps/honk.nix b/nixos/modules/services/web-apps/honk.nix
new file mode 100644
index 000000000000..e8718774575b
--- /dev/null
+++ b/nixos/modules/services/web-apps/honk.nix
@@ -0,0 +1,153 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+let
+  cfg = config.services.honk;
+
+  honk-initdb-script = cfg: pkgs.writeShellApplication {
+    name = "honk-initdb-script";
+
+    runtimeInputs = with pkgs; [ coreutils ];
+
+    text = ''
+      PW=$(cat "$CREDENTIALS_DIRECTORY/honk_passwordFile")
+
+      echo -e "${cfg.username}\n''$PW\n${cfg.host}:${toString cfg.port}\n${cfg.servername}" | ${lib.getExe cfg.package} -datadir "$STATE_DIRECTORY" init
+    '';
+  };
+in
+{
+  options = {
+    services.honk = {
+      enable = lib.mkEnableOption (lib.mdDoc "the Honk server");
+      package = lib.mkPackageOptionMD pkgs "honk" { };
+
+      host = lib.mkOption {
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          The host name or IP address the server should listen to.
+        '';
+        type = lib.types.str;
+      };
+
+      port = lib.mkOption {
+        default = 8080;
+        description = lib.mdDoc ''
+          The port the server should listen to.
+        '';
+        type = lib.types.port;
+      };
+
+      username = lib.mkOption {
+        description = lib.mdDoc ''
+          The admin account username.
+        '';
+        type = lib.types.str;
+      };
+
+      passwordFile = lib.mkOption {
+        description = lib.mdDoc ''
+          Password for admin account.
+          NOTE: Should be string not a store path, to prevent the password from being world readable
+        '';
+        type = lib.types.path;
+      };
+
+      servername = lib.mkOption {
+        description = lib.mdDoc ''
+          The server name.
+        '';
+        type = lib.types.str;
+      };
+
+      extraJS = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          An extra JavaScript file to be loaded by the client.
+        '';
+        type = lib.types.nullOr lib.types.path;
+      };
+
+      extraCSS = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          An extra CSS file to be loaded by the client.
+        '';
+        type = lib.types.nullOr lib.types.path;
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.username or "" != "";
+        message = ''
+          You have to define a username for Honk (`services.honk.username`).
+        '';
+      }
+      {
+        assertion = cfg.servername or "" != "";
+        message = ''
+          You have to define a servername for Honk (`services.honk.servername`).
+        '';
+      }
+    ];
+
+    systemd.services.honk-initdb = {
+      description = "Honk server database setup";
+      requiredBy = [ "honk.service" ];
+      before = [ "honk.service" ];
+
+      serviceConfig = {
+        LoadCredential = [
+          "honk_passwordFile:${cfg.passwordFile}"
+        ];
+        Type = "oneshot";
+        StateDirectory = "honk";
+        DynamicUser = true;
+        RemainAfterExit = true;
+        ExecStart = lib.getExe (honk-initdb-script cfg);
+        PrivateTmp = true;
+      };
+
+      unitConfig = {
+        ConditionPathExists = [
+          # Skip this service if the database already exists
+          "!$STATE_DIRECTORY/honk.db"
+        ];
+      };
+    };
+
+    systemd.services.honk = {
+      description = "Honk server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      bindsTo = [ "honk-initdb.service" ];
+      preStart = ''
+        mkdir -p $STATE_DIRECTORY/views
+        ${lib.optionalString (cfg.extraJS != null) "ln -fs ${cfg.extraJS} $STATE_DIRECTORY/views/local.js"}
+        ${lib.optionalString (cfg.extraCSS != null) "ln -fs ${cfg.extraCSS} $STATE_DIRECTORY/views/local.css"}
+        ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk backup $STATE_DIRECTORY/backup
+        ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk upgrade
+        ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk cleanup
+      '';
+      serviceConfig = {
+        ExecStart = ''
+          ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk
+        '';
+        StateDirectory = "honk";
+        DynamicUser = true;
+        PrivateTmp = "yes";
+        Restart = "on-failure";
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ drupol ];
+    doc = ./honk.md;
+  };
+}
diff --git a/nixos/modules/services/web-apps/lemmy.nix b/nixos/modules/services/web-apps/lemmy.nix
index 895f3a9f1b4b..20d9dcb7c266 100644
--- a/nixos/modules/services/web-apps/lemmy.nix
+++ b/nixos/modules/services/web-apps/lemmy.nix
@@ -160,7 +160,7 @@ in
               root * ${cfg.ui.package}/dist
               file_server
             }
-            handle_path /static/undefined/* {
+            handle_path /static/${cfg.ui.package.passthru.commit_sha}/* {
               root * ${cfg.ui.package}/dist
               file_server
             }
diff --git a/nixos/modules/services/web-apps/meme-bingo-web.nix b/nixos/modules/services/web-apps/meme-bingo-web.nix
new file mode 100644
index 000000000000..cb864321ef27
--- /dev/null
+++ b/nixos/modules/services/web-apps/meme-bingo-web.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkEnableOption mkIf mkOption mdDoc types literalExpression;
+
+  cfg = config.services.meme-bingo-web;
+in {
+  options = {
+    services.meme-bingo-web = {
+      enable = mkEnableOption (mdDoc ''
+        A web app for the meme bingo, rendered entirely on the web server and made interactive with forms.
+
+        Note: The application's author suppose to run meme-bingo-web behind a reverse proxy for SSL and HTTP/3.
+      '');
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.meme-bingo-web;
+        defaultText = literalExpression "pkgs.meme-bingo-web";
+        description = mdDoc "meme-bingo-web package to use.";
+      };
+
+      baseUrl = mkOption {
+        description = mdDoc ''
+          URL to be used for the HTML <base> element on all HTML routes.
+        '';
+        type = types.str;
+        default = "http://localhost:41678/";
+        example = "https://bingo.example.com/";
+      };
+      port = mkOption {
+        description = mdDoc ''
+          Port to be used for the web server.
+        '';
+        type = types.port;
+        default = 41678;
+        example = 21035;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.meme-bingo-web = {
+      description = "A web app for playing meme bingos.";
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        MEME_BINGO_BASE = cfg.baseUrl;
+        MEME_BINGO_PORT = toString cfg.port;
+      };
+      path = [ cfg.package ];
+
+      serviceConfig = {
+        User = "meme-bingo-web";
+        Group = "meme-bingo-web";
+
+        DynamicUser = true;
+
+        ExecStart = "${cfg.package}/bin/meme-bingo-web";
+
+        Restart = "always";
+        RestartSec = 1;
+
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DeviceAllow = [ "/dev/random" ];
+        LockPersonality = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectSystem = "strict";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+        UMask = "0077";
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        NoNewPrivileges = true;
+        MemoryDenyWriteExecute = true;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/mobilizon.nix b/nixos/modules/services/web-apps/mobilizon.nix
new file mode 100644
index 000000000000..e9264a38f0e6
--- /dev/null
+++ b/nixos/modules/services/web-apps/mobilizon.nix
@@ -0,0 +1,442 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mobilizon;
+
+  user = "mobilizon";
+  group = "mobilizon";
+
+  settingsFormat = pkgs.formats.elixirConf { elixir = pkgs.elixir_1_14; };
+
+  configFile = settingsFormat.generate "mobilizon-config.exs" cfg.settings;
+
+  # Make a package containing launchers with the correct envirenment, instead of
+  # setting it with systemd services, so that the user can also use them without
+  # troubles
+  launchers = pkgs.stdenv.mkDerivation rec {
+    pname = "${cfg.package.pname}-launchers";
+    inherit (cfg.package) version;
+
+    src = cfg.package;
+
+    nativeBuildInputs = with pkgs; [ makeWrapper ];
+
+    dontBuild = true;
+
+    installPhase = ''
+      mkdir -p $out/bin
+
+      makeWrapper \
+        $src/bin/mobilizon \
+        $out/bin/mobilizon \
+        --run '. ${secretEnvFile}' \
+        --set MOBILIZON_CONFIG_PATH "${configFile}" \
+        --set-default RELEASE_TMP "/tmp"
+
+      makeWrapper \
+        $src/bin/mobilizon_ctl \
+        $out/bin/mobilizon_ctl \
+        --run '. ${secretEnvFile}' \
+        --set MOBILIZON_CONFIG_PATH "${configFile}" \
+        --set-default RELEASE_TMP "/tmp"
+    '';
+  };
+
+  repoSettings = cfg.settings.":mobilizon"."Mobilizon.Storage.Repo";
+  instanceSettings = cfg.settings.":mobilizon".":instance";
+
+  isLocalPostgres = repoSettings.socket_dir != null;
+
+  dbUser = if repoSettings.username != null then repoSettings.username else "mobilizon";
+
+  postgresql = config.services.postgresql.package;
+  postgresqlSocketDir = "/var/run/postgresql";
+
+  secretEnvFile = "/var/lib/mobilizon/secret-env.sh";
+in
+{
+  options = {
+    services.mobilizon = {
+      enable = mkEnableOption
+        (lib.mdDoc "Mobilizon federated organization and mobilization platform");
+
+      nginx.enable = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether an Nginx virtual host should be
+          set up to serve Mobilizon.
+        '';
+      };
+
+      package = mkPackageOptionMD pkgs "mobilizon" { };
+
+      settings = mkOption {
+        type =
+          let
+            elixirTypes = settingsFormat.lib.types;
+          in
+          types.submodule {
+            freeformType = settingsFormat.type;
+
+            options = {
+              ":mobilizon" = {
+
+                "Mobilizon.Web.Endpoint" = {
+                  url.host = mkOption {
+                    type = elixirTypes.str;
+                    defaultText = lib.literalMD ''
+                      ''${settings.":mobilizon".":instance".hostname}
+                    '';
+                    description = lib.mdDoc ''
+                      Your instance's hostname for generating URLs throughout the app
+                    '';
+                  };
+
+                  http = {
+                    port = mkOption {
+                      type = elixirTypes.port;
+                      default = 4000;
+                      description = lib.mdDoc ''
+                        The port to run the server
+                      '';
+                    };
+                    ip = mkOption {
+                      type = elixirTypes.tuple;
+                      default = settingsFormat.lib.mkTuple [ 0 0 0 0 0 0 0 1 ];
+                      description = lib.mdDoc ''
+                        The IP address to listen on. Defaults to [::1] notated as a byte tuple.
+                      '';
+                    };
+                  };
+
+                  has_reverse_proxy = mkOption {
+                    type = elixirTypes.bool;
+                    default = true;
+                    description = lib.mdDoc ''
+                      Whether you use a reverse proxy
+                    '';
+                  };
+                };
+
+                ":instance" = {
+                  name = mkOption {
+                    type = elixirTypes.str;
+                    description = lib.mdDoc ''
+                      The fallback instance name if not configured into the admin UI
+                    '';
+                  };
+
+                  hostname = mkOption {
+                    type = elixirTypes.str;
+                    description = lib.mdDoc ''
+                      Your instance's hostname
+                    '';
+                  };
+
+                  email_from = mkOption {
+                    type = elixirTypes.str;
+                    defaultText = literalExpression ''
+                      noreply@''${settings.":mobilizon".":instance".hostname}
+                    '';
+                    description = lib.mdDoc ''
+                      The email for the From: header in emails
+                    '';
+                  };
+
+                  email_reply_to = mkOption {
+                    type = elixirTypes.str;
+                    defaultText = literalExpression ''
+                      ''${email_from}
+                    '';
+                    description = lib.mdDoc ''
+                      The email for the Reply-To: header in emails
+                    '';
+                  };
+                };
+
+                "Mobilizon.Storage.Repo" = {
+                  socket_dir = mkOption {
+                    type = types.nullOr elixirTypes.str;
+                    default = postgresqlSocketDir;
+                    description = lib.mdDoc ''
+                      Path to the postgres socket directory.
+
+                      Set this to null if you want to connect to a remote database.
+
+                      If non-null, the local PostgreSQL server will be configured with
+                      the configured database, permissions, and required extensions.
+
+                      If connecting to a remote database, please follow the
+                      instructions on how to setup your database:
+                      <https://docs.joinmobilizon.org/administration/install/release/#database-setup>
+                    '';
+                  };
+
+                  username = mkOption {
+                    type = types.nullOr elixirTypes.str;
+                    default = user;
+                    description = lib.mdDoc ''
+                      User used to connect to the database
+                    '';
+                  };
+
+                  database = mkOption {
+                    type = types.nullOr elixirTypes.str;
+                    default = "mobilizon_prod";
+                    description = lib.mdDoc ''
+                      Name of the database
+                    '';
+                  };
+                };
+              };
+            };
+          };
+        default = { };
+
+        description = lib.mdDoc ''
+          Mobilizon Elixir documentation, see
+          <https://docs.joinmobilizon.org/administration/configure/reference/>
+          for supported values.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.nginx.enable -> (cfg.settings.":mobilizon"."Mobilizon.Web.Endpoint".http.ip == settingsFormat.lib.mkTuple [ 0 0 0 0 0 0 0 1 ]);
+        message = "Setting the IP mobilizon listens on is only possible when the nginx config is not used, as it is hardcoded there.";
+      }
+    ];
+
+    services.mobilizon.settings = {
+      ":mobilizon" = {
+        "Mobilizon.Web.Endpoint" = {
+          server = true;
+          url.host = mkDefault instanceSettings.hostname;
+          secret_key_base =
+            settingsFormat.lib.mkGetEnv { envVariable = "MOBILIZON_INSTANCE_SECRET"; };
+        };
+
+        "Mobilizon.Web.Auth.Guardian".secret_key =
+          settingsFormat.lib.mkGetEnv { envVariable = "MOBILIZON_AUTH_SECRET"; };
+
+        ":instance" = {
+          registrations_open = mkDefault false;
+          demo = mkDefault false;
+          email_from = mkDefault "noreply@${instanceSettings.hostname}";
+          email_reply_to = mkDefault instanceSettings.email_from;
+        };
+
+        "Mobilizon.Storage.Repo" = {
+          # Forced by upstream since it uses PostgreSQL-specific extensions
+          adapter = settingsFormat.lib.mkAtom "Ecto.Adapters.Postgres";
+          pool_size = mkDefault 10;
+        };
+      };
+
+      ":tzdata".":data_dir" = "/var/lib/mobilizon/tzdata/";
+    };
+
+    # This somewhat follows upstream's systemd service here:
+    # https://framagit.org/framasoft/mobilizon/-/blob/master/support/systemd/mobilizon.service
+    systemd.services.mobilizon = {
+      description = "Mobilizon federated organization and mobilization platform";
+
+      wantedBy = [ "multi-user.target" ];
+
+      path = with pkgs; [
+        gawk
+        imagemagick
+        libwebp
+        file
+
+        # Optional:
+        gifsicle
+        jpegoptim
+        optipng
+        pngquant
+      ];
+
+      serviceConfig = {
+        ExecStartPre = "${launchers}/bin/mobilizon_ctl migrate";
+        ExecStart = "${launchers}/bin/mobilizon start";
+        ExecStop = "${launchers}/bin/mobilizon stop";
+
+        User = user;
+        Group = group;
+
+        StateDirectory = "mobilizon";
+
+        Restart = "on-failure";
+
+        PrivateTmp = true;
+        ProtectSystem = "full";
+        NoNewPrivileges = true;
+
+        ReadWritePaths = mkIf isLocalPostgres postgresqlSocketDir;
+      };
+    };
+
+    # Create the needed secrets before running Mobilizon, so that they are not
+    # in the nix store
+    #
+    # Since some of these tasks are quite common for Elixir projects (COOKIE for
+    # every BEAM project, Phoenix and Guardian are also quite common), this
+    # service could be abstracted in the future, and used by other Elixir
+    # projects.
+    systemd.services.mobilizon-setup-secrets = {
+      description = "Mobilizon setup secrets";
+      before = [ "mobilizon.service" ];
+      wantedBy = [ "mobilizon.service" ];
+
+      script =
+        let
+          # Taken from here:
+          # https://framagit.org/framasoft/mobilizon/-/blob/1.0.7/lib/mix/tasks/mobilizon/instance.ex#L132-133
+          genSecret =
+            "IO.puts(:crypto.strong_rand_bytes(64)" +
+            "|> Base.encode64()" +
+            "|> binary_part(0, 64))";
+
+          # Taken from here:
+          # https://github.com/elixir-lang/elixir/blob/v1.11.3/lib/mix/lib/mix/release.ex#L499
+          genCookie = "IO.puts(Base.encode32(:crypto.strong_rand_bytes(32)))";
+
+          evalElixir = str: ''
+            ${pkgs.elixir_1_14}/bin/elixir --eval '${str}'
+          '';
+        in
+        ''
+          set -euxo pipefail
+
+          if [ ! -f "${secretEnvFile}" ]; then
+            install -m 600 /dev/null "${secretEnvFile}"
+            cat > "${secretEnvFile}" <<EOF
+          # This file was automatically generated by mobilizon-setup-secrets.service
+          export MOBILIZON_AUTH_SECRET='$(${evalElixir genSecret})'
+          export MOBILIZON_INSTANCE_SECRET='$(${evalElixir genSecret})'
+          export RELEASE_COOKIE='$(${evalElixir genCookie})'
+          EOF
+          fi
+        '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = user;
+        Group = group;
+        StateDirectory = "mobilizon";
+      };
+    };
+
+    # Add the required PostgreSQL extensions to the local PostgreSQL server,
+    # if local PostgreSQL is configured.
+    systemd.services.mobilizon-postgresql = mkIf isLocalPostgres {
+      description = "Mobilizon PostgreSQL setup";
+
+      after = [ "postgresql.service" ];
+      before = [ "mobilizon.service" "mobilizon-setup-secrets.service" ];
+      wantedBy = [ "mobilizon.service" ];
+
+      path = [ postgresql ];
+
+      # Taken from here:
+      # https://framagit.org/framasoft/mobilizon/-/blob/1.1.0/priv/templates/setup_db.eex
+      script =
+        ''
+          psql "${repoSettings.database}" -c "\
+            CREATE EXTENSION IF NOT EXISTS postgis; \
+            CREATE EXTENSION IF NOT EXISTS pg_trgm; \
+            CREATE EXTENSION IF NOT EXISTS unaccent;"
+        '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = config.services.postgresql.superUser;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /var/lib/mobilizon/uploads/exports/csv 700 mobilizon mobilizon - -"
+      "Z /var/lib/mobilizon 700 mobilizon mobilizon - -"
+    ];
+
+    services.postgresql = mkIf isLocalPostgres {
+      enable = true;
+      ensureDatabases = [ repoSettings.database ];
+      ensureUsers = [
+        {
+          name = dbUser;
+          ensurePermissions = {
+            "DATABASE \"${repoSettings.database}\"" = "ALL PRIVILEGES";
+          };
+        }
+      ];
+      extraPlugins = with postgresql.pkgs; [ postgis ];
+    };
+
+    # Nginx config taken from support/nginx/mobilizon-release.conf
+    services.nginx =
+      let
+        inherit (cfg.settings.":mobilizon".":instance") hostname;
+        proxyPass = "http://[::1]:"
+          + toString cfg.settings.":mobilizon"."Mobilizon.Web.Endpoint".http.port;
+      in
+      lib.mkIf cfg.nginx.enable {
+        enable = true;
+        virtualHosts."${hostname}" = {
+          enableACME = lib.mkDefault true;
+          forceSSL = lib.mkDefault true;
+          extraConfig = ''
+            proxy_http_version 1.1;
+            proxy_set_header Upgrade $http_upgrade;
+            proxy_set_header Connection "upgrade";
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Proto $scheme;
+          '';
+          locations."/" = {
+            inherit proxyPass;
+          };
+          locations."~ ^/(js|css|img)" = {
+            root = "${cfg.package}/lib/mobilizon-${cfg.package.version}/priv/static";
+            extraConfig = ''
+              etag off;
+              access_log off;
+              add_header Cache-Control "public, max-age=31536000, immutable";
+            '';
+          };
+          locations."~ ^/(media|proxy)" = {
+            inherit proxyPass;
+            extraConfig = ''
+              etag off;
+              access_log off;
+              add_header Cache-Control "public, max-age=31536000, immutable";
+            '';
+          };
+        };
+      };
+
+    users.users.${user} = {
+      description = "Mobilizon daemon user";
+      group = group;
+      isSystemUser = true;
+    };
+
+    users.groups.${group} = { };
+
+    # So that we have the `mobilizon` and `mobilizon_ctl` commands.
+    # The `mobilizon remote` command is useful for dropping a shell into the
+    # running Mobilizon instance, and `mobilizon_ctl` is used for common
+    # management tasks (e.g. adding users).
+    environment.systemPackages = [ launchers ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ minijackson erictapen ];
+}
diff --git a/nixos/modules/services/web-apps/netbox.nix b/nixos/modules/services/web-apps/netbox.nix
index 5f42f42a9af9..6d89ffc2a7b7 100644
--- a/nixos/modules/services/web-apps/netbox.nix
+++ b/nixos/modules/services/web-apps/netbox.nix
@@ -169,6 +169,13 @@ in {
         AUTH_LDAP_FIND_GROUP_PERMS = True
       '';
     };
+    keycloakClientSecret = lib.mkOption {
+      type = with lib.types; nullOr path;
+      default = null;
+      description = lib.mdDoc ''
+        File that contains the keycloak client secret.
+      '';
+    };
   };
 
   config = lib.mkIf cfg.enable {
@@ -227,7 +234,10 @@ in {
       extraConfig = ''
         with open("${cfg.secretKeyFile}", "r") as file:
             SECRET_KEY = file.readline()
-      '';
+      '' + (lib.optionalString (cfg.keycloakClientSecret != null) ''
+        with open("${cfg.keycloakClientSecret}", "r") as file:
+            SOCIAL_AUTH_KEYCLOAK_SECRET = file.readline()
+      '');
     };
 
     services.redis.servers.netbox.enable = true;
diff --git a/nixos/modules/services/web-apps/plausible.nix b/nixos/modules/services/web-apps/plausible.nix
index 911daa53e658..e2d5cdc4f7c7 100644
--- a/nixos/modules/services/web-apps/plausible.nix
+++ b/nixos/modules/services/web-apps/plausible.nix
@@ -248,10 +248,10 @@ in {
             # setup
             ${cfg.package}/createdb.sh
             ${cfg.package}/migrate.sh
+            export IP_GEOLOCATION_DB=${pkgs.dbip-country-lite}/share/dbip/dbip-country-lite.mmdb
+            ${cfg.package}/bin/plausible eval "(Plausible.Release.prepare() ; Plausible.Auth.create_user(\"$ADMIN_USER_NAME\", \"$ADMIN_USER_EMAIL\", \"$ADMIN_USER_PWD\"))"
             ${optionalString cfg.adminUser.activate ''
-              if ! ${cfg.package}/init-admin.sh | grep 'already exists'; then
-                psql -d plausible <<< "UPDATE users SET email_verified=true;"
-              fi
+              psql -d plausible <<< "UPDATE users SET email_verified=true where email = '$ADMIN_USER_EMAIL';"
             ''}
 
             exec plausible start
diff --git a/nixos/modules/services/web-apps/tt-rss.nix b/nixos/modules/services/web-apps/tt-rss.nix
index 3102e6a46953..592ab253f7da 100644
--- a/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixos/modules/services/web-apps/tt-rss.nix
@@ -595,47 +595,9 @@ let
       tt-rss = {
         description = "Tiny Tiny RSS feeds update daemon";
 
-        preStart = let
-          callSql = e:
-              if cfg.database.type == "pgsql" then ''
-                  ${optionalString (cfg.database.password != null) "PGPASSWORD=${cfg.database.password}"} \
-                  ${optionalString (cfg.database.passwordFile != null) "PGPASSWORD=$(cat ${cfg.database.passwordFile})"} \
-                  ${config.services.postgresql.package}/bin/psql \
-                    -U ${cfg.database.user} \
-                    ${optionalString (cfg.database.host != null) "-h ${cfg.database.host} --port ${toString dbPort}"} \
-                    -c '${e}' \
-                    ${cfg.database.name}''
-
-              else if cfg.database.type == "mysql" then ''
-                  echo '${e}' | ${config.services.mysql.package}/bin/mysql \
-                    -u ${cfg.database.user} \
-                    ${optionalString (cfg.database.password != null) "-p${cfg.database.password}"} \
-                    ${optionalString (cfg.database.host != null) "-h ${cfg.database.host} -P ${toString dbPort}"} \
-                    ${cfg.database.name}''
-
-              else "";
-
-        in (optionalString (cfg.database.type == "pgsql") ''
-          exists=$(${callSql "select count(*) > 0 from pg_tables where tableowner = user"} \
-          | tail -n+3 | head -n-2 | sed -e 's/[ \n\t]*//')
-
-          if [ "$exists" == 'f' ]; then
-            ${callSql "\\i ${pkgs.tt-rss}/schema/ttrss_schema_${cfg.database.type}.sql"}
-          else
-            echo 'The database contains some data. Leaving it as it is.'
-          fi;
-        '')
-
-        + (optionalString (cfg.database.type == "mysql") ''
-          exists=$(${callSql "select count(*) > 0 from information_schema.tables where table_schema = schema()"} \
-          | tail -n+2 | sed -e 's/[ \n\t]*//')
-
-          if [ "$exists" == '0' ]; then
-            ${callSql "\\. ${pkgs.tt-rss}/schema/ttrss_schema_${cfg.database.type}.sql"}
-          else
-            echo 'The database contains some data. Leaving it as it is.'
-          fi;
-        '');
+        preStart = ''
+          ${pkgs.php81}/bin/php ${cfg.root}/www/update.php --update-schema
+        '';
 
         serviceConfig = {
           User = "${cfg.user}";
diff --git a/nixos/modules/services/web-apps/vikunja.nix b/nixos/modules/services/web-apps/vikunja.nix
index 8bc8e8c29259..6b1d4da532bf 100644
--- a/nixos/modules/services/web-apps/vikunja.nix
+++ b/nixos/modules/services/web-apps/vikunja.nix
@@ -147,5 +147,9 @@ in {
     };
 
     environment.etc."vikunja/config.yaml".source = configFile;
+
+    environment.systemPackages = [
+      cfg.package-api # for admin `vikunja` CLI
+    ];
   };
 }
diff --git a/nixos/modules/services/web-servers/caddy/default.nix b/nixos/modules/services/web-servers/caddy/default.nix
index cec0b379f67a..ce74e243a181 100644
--- a/nixos/modules/services/web-servers/caddy/default.nix
+++ b/nixos/modules/services/web-servers/caddy/default.nix
@@ -36,6 +36,7 @@ let
             ${cfg.globalConfig}
           }
           ${cfg.extraConfig}
+          ${concatMapStringsSep "\n" mkVHostConf virtualHosts}
         '';
 
         Caddyfile-formatted = pkgs.runCommand "Caddyfile-formatted" { nativeBuildInputs = [ cfg.package ]; } ''
@@ -340,7 +341,6 @@ in
       groups = config.users.groups;
     }) acmeHosts;
 
-    services.caddy.extraConfig = concatMapStringsSep "\n" mkVHostConf virtualHosts;
     services.caddy.globalConfig = ''
       ${optionalString (cfg.email != null) "email ${cfg.email}"}
       ${optionalString (cfg.acmeCA != null) "acme_ca ${cfg.acmeCA}"}
diff --git a/nixos/modules/services/web-servers/garage.nix b/nixos/modules/services/web-servers/garage.nix
index 8b5734b5a2ce..80fb24fe2c5e 100644
--- a/nixos/modules/services/web-servers/garage.nix
+++ b/nixos/modules/services/web-servers/garage.nix
@@ -23,6 +23,12 @@ in
       example = { RUST_BACKTRACE="yes"; };
     };
 
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      description = lib.mdDoc "File containing environment variables to be passed to the Garage server.";
+      default = null;
+    };
+
     logLevel = mkOption {
       type = types.enum (["info" "debug" "trace"]);
       default = "info";
@@ -80,7 +86,7 @@ in
       after = [ "network.target" "network-online.target" ];
       wants = [ "network.target" "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
-      restartTriggers = [ configFile ];
+      restartTriggers = [ configFile ] ++ (lib.optional (cfg.environmentFile != null) cfg.environmentFile);
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/garage server";
 
@@ -88,6 +94,7 @@ in
         DynamicUser = lib.mkDefault true;
         ProtectHome = true;
         NoNewPrivileges = true;
+        EnvironmentFile = lib.optional (cfg.environmentFile != null) cfg.environmentFile;
       };
       environment = {
         RUST_LOG = lib.mkDefault "garage=${cfg.logLevel}";
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index fccc31b5116c..7a7fb4061eea 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -578,11 +578,13 @@ in
           };
         });
         default = [];
-        example = literalExpression ''[
-          { addr = "10.0.0.12"; proxyProtocol = true; ssl = true; }
-          { addr = "0.0.0.0"; }
-          { addr = "[::0]"; }
-        ]'';
+        example = literalExpression ''
+          [
+            { addr = "10.0.0.12"; proxyProtocol = true; ssl = true; }
+            { addr = "0.0.0.0"; }
+            { addr = "[::0]"; }
+          ]
+        '';
         description = lib.mdDoc ''
           If vhosts do not specify listen, use these addresses by default.
           This option takes precedence over {option}`defaultListenAddresses` and
diff --git a/nixos/modules/services/x11/desktop-managers/budgie.nix b/nixos/modules/services/x11/desktop-managers/budgie.nix
index bee627ec76c0..a4f8bd5051ec 100644
--- a/nixos/modules/services/x11/desktop-managers/budgie.nix
+++ b/nixos/modules/services/x11/desktop-managers/budgie.nix
@@ -134,6 +134,7 @@ in {
         # Update user directories.
         xdg-user-dirs
       ]
+      ++ lib.optional config.networking.networkmanager.enable pkgs.networkmanagerapplet
       ++ (utils.removePackagesByName [
           cinnamon.nemo
           mate.eom
@@ -192,7 +193,7 @@ in {
     # Required by Budgie Panel plugins and/or Budgie Control Center panels.
     networking.networkmanager.enable = mkDefault true; # for BCC's Network panel.
     programs.nm-applet.enable = config.networking.networkmanager.enable; # Budgie has no Network applet.
-    programs.nm-applet.indicator = false; # Budgie doesn't support AppIndicators.
+    programs.nm-applet.indicator = true; # Budgie uses AppIndicators.
 
     hardware.bluetooth.enable = mkDefault true; # for Budgie's Status Indicator and BCC's Bluetooth panel.
     hardware.pulseaudio.enable = mkDefault true; # for Budgie's Status Indicator and BCC's Sound panel.
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index 15a510fd8f96..282a34f6b011 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -172,24 +172,19 @@ in
     (mkIf (cfg.enable || cfg.mobile.enable || cfg.bigscreen.enable) {
 
       security.wrappers = {
-        kscreenlocker_greet = {
-          setuid = true;
+        kwin_wayland = {
           owner = "root";
           group = "root";
-          source = "${getBin libsForQt5.kscreenlocker}/libexec/kscreenlocker_greet";
+          capabilities = "cap_sys_nice+ep";
+          source = "${getBin plasma5.kwin}/bin/kwin_wayland";
         };
+      } // mkIf (!cfg.runUsingSystemd) {
         start_kdeinit = {
           setuid = true;
           owner = "root";
           group = "root";
           source = "${getBin libsForQt5.kinit}/libexec/kf5/start_kdeinit";
         };
-        kwin_wayland = {
-          owner = "root";
-          group = "root";
-          capabilities = "cap_sys_nice+ep";
-          source = "${getBin plasma5.kwin}/bin/kwin_wayland";
-        };
       };
 
       environment.systemPackages =
diff --git a/nixos/modules/services/x11/display-managers/gdm.nix b/nixos/modules/services/x11/display-managers/gdm.nix
index 676d08b93e2c..400e5601dc59 100644
--- a/nixos/modules/services/x11/display-managers/gdm.nix
+++ b/nixos/modules/services/x11/display-managers/gdm.nix
@@ -97,6 +97,19 @@ in
         type = types.bool;
       };
 
+      banner = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        example = ''
+          foo
+          bar
+          baz
+        '';
+        description = lib.mdDoc ''
+          Optional message to display on the login screen.
+        '';
+      };
+
       settings = mkOption {
         type = settingsFormat.type;
         default = { };
@@ -231,40 +244,19 @@ in
 
     systemd.user.services.dbus.wantedBy = [ "default.target" ];
 
-    programs.dconf.profiles.gdm =
-    let
-      customDconf = pkgs.writeTextFile {
-        name = "gdm-dconf";
-        destination = "/dconf/gdm-custom";
-        text = ''
-          ${optionalString (!cfg.gdm.autoSuspend) ''
-            [org/gnome/settings-daemon/plugins/power]
-            sleep-inactive-ac-type='nothing'
-            sleep-inactive-battery-type='nothing'
-            sleep-inactive-ac-timeout=0
-            sleep-inactive-battery-timeout=0
-          ''}
-        '';
+    programs.dconf.profiles.gdm.databases = lib.optionals (!cfg.gdm.autoSuspend) [{
+      settings."org/gnome/settings-daemon/plugins/power" = {
+        sleep-inactive-ac-type = "nothing";
+        sleep-inactive-battery-type = "nothing";
+        sleep-inactive-ac-timeout = lib.gvariant.mkInt32 0;
+        sleep-inactive-battery-timeout = lib.gvariant.mkInt32 0;
       };
-
-      customDconfDb = pkgs.stdenv.mkDerivation {
-        name = "gdm-dconf-db";
-        buildCommand = ''
-          ${pkgs.dconf}/bin/dconf compile $out ${customDconf}/dconf
-        '';
+    }] ++ lib.optionals (cfg.gdm.banner != null) [{
+      settings."org/gnome/login-screen" = {
+        banner-message-enable = true;
+        banner-message-text = cfg.gdm.banner;
       };
-    in pkgs.stdenv.mkDerivation {
-      name = "dconf-gdm-profile";
-      buildCommand = ''
-        # Check that the GDM profile starts with what we expect.
-        if [ $(head -n 1 ${gdm}/share/dconf/profile/gdm) != "user-db:user" ]; then
-          echo "GDM dconf profile changed, please update gdm.nix"
-          exit 1
-        fi
-        # Insert our custom DB behind it.
-        sed '2ifile-db:${customDconfDb}' ${gdm}/share/dconf/profile/gdm > $out
-      '';
-    };
+    }] ++ [ "${gdm}/share/gdm/greeter-dconf-defaults" ];
 
     # Use AutomaticLogin if delay is zero, because it's immediate.
     # Otherwise with TimedLogin with zero seconds the prompt is still
diff --git a/nixos/modules/services/x11/display-managers/sddm.nix b/nixos/modules/services/x11/display-managers/sddm.nix
index c04edd0d4b7a..47e60236eaeb 100644
--- a/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixos/modules/services/x11/display-managers/sddm.nix
@@ -267,6 +267,7 @@ in
 
     environment.systemPackages = [ sddm ];
     services.dbus.packages = [ sddm ];
+    systemd.tmpfiles.packages = [ sddm ];
 
     # We're not using the upstream unit, so copy these: https://github.com/sddm/sddm/blob/develop/services/sddm.service.in
     systemd.services.display-manager.after = [
diff --git a/nixos/modules/services/x11/window-managers/default.nix b/nixos/modules/services/x11/window-managers/default.nix
index ce1d4115f225..e180f2693e0c 100644
--- a/nixos/modules/services/x11/window-managers/default.nix
+++ b/nixos/modules/services/x11/window-managers/default.nix
@@ -35,6 +35,7 @@ in
     ./openbox.nix
     ./pekwm.nix
     ./notion.nix
+    ./ragnarwm.nix
     ./ratpoison.nix
     ./sawfish.nix
     ./smallwm.nix
diff --git a/nixos/modules/services/x11/window-managers/oroborus.nix b/nixos/modules/services/x11/window-managers/oroborus.nix
deleted file mode 100644
index 654b8708e48f..000000000000
--- a/nixos/modules/services/x11/window-managers/oroborus.nix
+++ /dev/null
@@ -1,25 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-  cfg = config.services.xserver.windowManager.oroborus;
-in
-{
-  ###### interface
-  options = {
-    services.xserver.windowManager.oroborus.enable = mkEnableOption (lib.mdDoc "oroborus");
-  };
-
-  ###### implementation
-  config = mkIf cfg.enable {
-    services.xserver.windowManager.session = singleton {
-      name = "oroborus";
-      start = ''
-        ${pkgs.oroborus}/bin/oroborus &
-        waitPID=$!
-      '';
-    };
-    environment.systemPackages = [ pkgs.oroborus ];
-  };
-}
diff --git a/nixos/modules/services/x11/window-managers/ragnarwm.nix b/nixos/modules/services/x11/window-managers/ragnarwm.nix
new file mode 100644
index 000000000000..0843b872dba5
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/ragnarwm.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.ragnarwm;
+in
+{
+  ###### interface
+
+  options = {
+    services.xserver.windowManager.ragnarwm = {
+      enable = mkEnableOption (lib.mdDoc "ragnarwm");
+      package = mkOption {
+        type = types.package;
+        default = pkgs.ragnarwm;
+        defaultText = literalExpression "pkgs.ragnarwm";
+        description = lib.mdDoc ''
+          The ragnar package to use.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    services.xserver.displayManager.sessionPackages = [ cfg.package ];
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ sigmanificient ];
+}
diff --git a/nixos/modules/system/activation/switch-to-configuration.pl b/nixos/modules/system/activation/switch-to-configuration.pl
index 04d90968c4c1..e05f89bb0fb4 100755
--- a/nixos/modules/system/activation/switch-to-configuration.pl
+++ b/nixos/modules/system/activation/switch-to-configuration.pl
@@ -74,7 +74,7 @@ if ("@localeArchive@" ne "") {
 
 if (!defined($action) || ($action ne "switch" && $action ne "boot" && $action ne "test" && $action ne "dry-activate")) {
     print STDERR <<"EOF";
-Usage: $0 [switch|boot|test]
+Usage: $0 [switch|boot|test|dry-activate]
 
 switch:       make the configuration the boot default and activate now
 boot:         make the configuration the boot default
@@ -313,7 +313,8 @@ sub unrecord_unit {
 # needs to be restarted or reloaded. If the units differ, the service
 # is restarted unless the only difference is `X-Reload-Triggers` in the
 # `Unit` section. If this is the only modification, the unit is reloaded
-# instead of restarted.
+# instead of restarted. If the only difference is `Options` in the
+# `[Mount]` section, the unit is reloaded rather than restarted.
 # Returns:
 # - 0 if the units are equal
 # - 1 if the units are different and a restart action is required
@@ -390,6 +391,11 @@ sub compare_units { ## no critic(Subroutines::ProhibitExcessComplexity)
                         next;
                     }
                 }
+                # If this is a mount unit, check if it was only `Options`
+                if ($section_name eq "Mount" and $ini_key eq "Options") {
+                    $ret = 2;
+                    next;
+                }
                 return 1;
             }
         }
@@ -440,10 +446,18 @@ sub handle_modified_unit { ## no critic(Subroutines::ProhibitManyArgs, Subroutin
         # properties (resource limits and inotify watches)
         # seem to get applied on daemon-reload.
     } elsif ($unit =~ /\.mount$/msx) {
-        # Reload the changed mount unit to force a remount.
-        # FIXME: only reload when Options= changed, restart otherwise
-        $units_to_reload->{$unit} = 1;
-        record_unit($reload_list_file, $unit);
+        # Just restart the unit. We wouldn't have gotten into this subroutine
+        # if only `Options` was changed, in which case the unit would be reloaded.
+        # The only exception is / and /nix because it's very unlikely we can safely
+        # unmount them so we reload them instead. This means that we may not get
+        # all changes into the running system but it's better than crashing it.
+        if ($unit eq "-.mount" or $unit eq "nix.mount") {
+            $units_to_reload->{$unit} = 1;
+            record_unit($reload_list_file, $unit);
+        } else {
+            $units_to_restart->{$unit} = 1;
+            record_unit($restart_list_file, $unit);
+        }
     } elsif ($unit =~ /\.socket$/msx) {
         # FIXME: do something?
         # Attempt to fix this: https://github.com/NixOS/nixpkgs/pull/141192
@@ -647,10 +661,20 @@ foreach my $mount_point (keys(%{$cur_fss})) {
         # Filesystem entry disappeared, so unmount it.
         $units_to_stop{$unit} = 1;
     } elsif ($cur->{fsType} ne $new->{fsType} || $cur->{device} ne $new->{device}) {
-        # Filesystem type or device changed, so unmount and mount it.
-        $units_to_stop{$unit} = 1;
-        $units_to_start{$unit} = 1;
-        record_unit($start_list_file, $unit);
+        if ($mount_point eq '/' or $mount_point eq '/nix') {
+            if ($cur->{options} ne $new->{options}) {
+                # Mount options changed, so remount it.
+                $units_to_reload{$unit} = 1;
+                record_unit($reload_list_file, $unit);
+            } else {
+                # Don't unmount / or /nix if the device changed
+                $units_to_skip{$unit} = 1;
+            }
+        } else {
+            # Filesystem type or device changed, so unmount and mount it.
+            $units_to_restart{$unit} = 1;
+            record_unit($restart_list_file, $unit);
+        }
     } elsif ($cur->{options} ne $new->{options}) {
         # Mount options changes, so remount it.
         $units_to_reload{$unit} = 1;
diff --git a/nixos/modules/system/boot/binfmt.nix b/nixos/modules/system/boot/binfmt.nix
index 5172371d0afb..8c9483f01c10 100644
--- a/nixos/modules/system/boot/binfmt.nix
+++ b/nixos/modules/system/boot/binfmt.nix
@@ -102,20 +102,28 @@ let
       mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\x00'';
     };
     mips-linux = {
-      magicOrExtension = ''\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08'';
-      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'';
+      magicOrExtension = ''\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20'';
     };
     mipsel-linux = {
-      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00'';
-      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00'';
     };
     mips64-linux = {
       magicOrExtension = ''\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08'';
-      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'';
     };
     mips64el-linux = {
       magicOrExtension = ''\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00'';
-      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    mips64-linuxabin32 = {
+      magicOrExtension = ''\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20'';
+    };
+    mips64el-linuxabin32 = {
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00'';
     };
     riscv32-linux = {
       magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00'';
@@ -271,7 +279,7 @@ in {
           support your new systems.
           Warning: the builder can execute all emulated systems within the same build, which introduces impurities in the case of cross compilation.
         '';
-        type = types.listOf types.str;
+        type = types.listOf (types.enum (builtins.attrNames magics));
       };
     };
   };
diff --git a/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.sh b/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.sh
index 1a0da0050291..84a0a93ded17 100644
--- a/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.sh
+++ b/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.sh
@@ -70,13 +70,33 @@ copyToKernelsDir() {
 addEntry() {
     local path=$(readlink -f "$1")
     local tag="$2" # Generation number or 'default'
+    local current="$3" # whether this is the current/latest generation
 
     if ! test -e $path/kernel -a -e $path/initrd; then
         return
     fi
 
+    if test -e "$path/append-initrd-secrets"; then
+        local initrd="$target/nixos/$(basename "$path")-initramfs-with-secrets"
+        cp $(readlink -f "$path/initrd") "$initrd"
+        chmod 600 "${initrd}"
+        chown 0:0 "${initrd}"
+        filesCopied[$initrd]=1
+
+        "$path/append-initrd-secrets" "$initrd" || if test "${current}" = "1"; then
+            echo "failed to create initrd secrets for the current generation." >&2
+            echo "are your \`boot.initrd.secrets\` still in place?" >&2
+            exit 1
+        else
+            echo "warning: failed to create initrd secrets for \"$path\", an older generation" >&2
+            echo "note: this is normal after having removed or renamed a file in \`boot.initrd.secrets\`" >&2
+        fi
+    else
+        copyToKernelsDir "$path/initrd"; initrd=$result
+    fi
+
     copyToKernelsDir "$path/kernel"; kernel=$result
-    copyToKernelsDir "$path/initrd"; initrd=$result
+
     dtbDir=$(readlink -m "$path/dtbs")
     if [ -e "$dtbDir" ]; then
         copyToKernelsDir "$dtbDir"; dtbs=$result
@@ -130,18 +150,20 @@ MENU TITLE ------------------------------------------------------------
 TIMEOUT $timeout
 EOF
 
-addEntry $default default >> $tmpFile
+addEntry $default default 1 >> $tmpFile
 
 if [ "$numGenerations" -gt 0 ]; then
     # Add up to $numGenerations generations of the system profile to the menu,
     # in reverse (most recent to least recent) order.
+    current=1
     for generation in $(
             (cd /nix/var/nix/profiles && ls -d system-*-link) \
             | sed 's/system-\([0-9]\+\)-link/\1/' \
             | sort -n -r \
             | head -n $numGenerations); do
         link=/nix/var/nix/profiles/system-$generation-link
-        addEntry $link $generation
+        addEntry $link $generation $current
+        current=0
     done >> $tmpFile
 fi
 
diff --git a/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix b/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix
index 9c9bee93de8a..c64ef092667b 100644
--- a/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix
+++ b/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix
@@ -142,6 +142,7 @@ in
         assertion = !pkgs.stdenv.hostPlatform.isAarch64 || cfg.version >= 3;
         message = "Only Raspberry Pi >= 3 supports aarch64.";
       };
+      boot.loader.supportsInitrdSecrets = cfg.uboot.enable;
 
       system.build.installBootLoader = builder;
       system.boot.loader.id = "raspberrypi";
diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
index 1770f0759434..d9a1535ffc7d 100644
--- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
+++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
@@ -147,7 +147,7 @@ in {
         default = false;
         type = types.bool;
         description = lib.mdDoc ''
-          Make MemTest86+ available from the systemd-boot menu. MemTest86+ is a
+          Make Memtest86+ available from the systemd-boot menu. Memtest86+ is a
           program for testing memory.
         '';
       };
@@ -191,7 +191,7 @@ in {
       default = {};
       example = literalExpression ''
         { "memtest86.conf" = '''
-          title MemTest86+
+          title Memtest86+
           efi /efi/memtest86/memtest.efi
         '''; }
       '';
@@ -285,7 +285,7 @@ in {
     boot.loader.systemd-boot.extraEntries = mkMerge [
       (mkIf cfg.memtest86.enable {
         "${cfg.memtest86.entryFilename}" = ''
-          title  MemTest86
+          title  Memtest86+
           efi    /efi/memtest86/memtest.efi
         '';
       })
diff --git a/nixos/modules/system/boot/luksroot.nix b/nixos/modules/system/boot/luksroot.nix
index dc3fe163116e..06c329e006b8 100644
--- a/nixos/modules/system/boot/luksroot.nix
+++ b/nixos/modules/system/boot/luksroot.nix
@@ -351,6 +351,12 @@ let
 
         new_response="$(ykchalresp -${toString dev.yubikey.slot} -x $new_challenge 2>/dev/null)"
 
+        if [ -z "$new_response" ]; then
+            echo "Warning: Unable to generate new challenge response, current challenge persists!"
+            umount /crypt-storage
+            return
+        fi
+
         if [ ! -z "$k_user" ]; then
             new_k_luks="$(echo -n $k_user | pbkdf2-sha512 ${toString dev.yubikey.keyLength} $new_iterations $new_response | rbtohex)"
         else
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index 6d0afcc57fcc..238c6670ea0f 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -902,6 +902,9 @@ let
           "RelayTarget"
           "RelayAgentCircuitId"
           "RelayAgentRemoteId"
+          "BootServerAddress"
+          "BootServerName"
+          "BootFilename"
         ])
         (assertInt "PoolOffset")
         (assertMinimum "PoolOffset" 0)
@@ -2812,9 +2815,15 @@ let
 
       environment.etc."systemd/networkd.conf" = renderConfig cfg.config;
 
-      systemd.services.systemd-networkd = {
+      systemd.services.systemd-networkd = let
+        isReloadableUnitFileName = unitFileName: strings.hasSuffix ".network" unitFileName;
+        reloadableUnitFiles = attrsets.filterAttrs (k: v: isReloadableUnitFileName k) unitFiles;
+        nonReloadableUnitFiles = attrsets.filterAttrs (k: v: !isReloadableUnitFileName k) unitFiles;
+        unitFileSources = unitFiles: map (x: x.source) (attrValues unitFiles);
+      in {
         wantedBy = [ "multi-user.target" ];
-        restartTriggers = map (x: x.source) (attrValues unitFiles) ++ [
+        reloadTriggers = unitFileSources reloadableUnitFiles;
+        restartTriggers = unitFileSources nonReloadableUnitFiles ++ [
           config.environment.etc."systemd/networkd.conf".source
         ];
         aliases = [ "dbus-org.freedesktop.network1.service" ];
diff --git a/nixos/modules/system/boot/resolved.nix b/nixos/modules/system/boot/resolved.nix
index 4e7201833db6..b898a6317962 100644
--- a/nixos/modules/system/boot/resolved.nix
+++ b/nixos/modules/system/boot/resolved.nix
@@ -66,7 +66,7 @@ in
     };
 
     services.resolved.dnssec = mkOption {
-      default = "allow-downgrade";
+      default = "false";
       example = "true";
       type = types.enum [ "true" "allow-downgrade" "false" ];
       description = lib.mdDoc ''
@@ -85,6 +85,12 @@ in
             synthesizing a DNS response that suggests DNSSEC was not
             supported.
         - `"false"`: DNS lookups are not DNSSEC validated.
+
+        At the time of September 2023, systemd upstream advise
+        to disable DNSSEC by default as the current code
+        is not robust enough to deal with "in the wild" non-compliant
+        servers, which will usually give you a broken bad experience
+        in addition of insecure.
       '';
     };
 
diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
index 7aaa3f85bfe0..1cf58dbe9f1f 100644
--- a/nixos/modules/system/boot/stage-1.nix
+++ b/nixos/modules/system/boot/stage-1.nix
@@ -123,7 +123,7 @@ let
         # ZFS properties such as `setuid=off` and `exec=off` (unless manually
         # duplicated in `fileSystems.*.options`, defeating "zfsutil"'s purpose).
         copy_bin_and_libs ${lib.getOutput "mount" pkgs.util-linux}/bin/mount
-        copy_bin_and_libs ${pkgs.zfs}/bin/mount.zfs
+        copy_bin_and_libs ${config.boot.zfs.package}/bin/mount.zfs
       ''}
 
       # Copy some util-linux stuff.
@@ -610,6 +610,13 @@ in
             path the secret should have inside the initrd, the value
             is the path it should be copied from (or null for the same
             path inside and out).
+
+            The loader `generic-extlinux-compatible` supports this. Because
+            it is not well know how different implementations react to
+            concatenated cpio archives, this is disabled by default. It can be
+            enabled by setting {option}`boot.loader.supportsInitrdSecrets`
+            to true. If this works for you, please report your findings at
+            https://github.com/NixOS/nixpkgs/issues/247145 .
           '';
         example = literalExpression
           ''
diff --git a/nixos/modules/system/boot/stage-2-init.sh b/nixos/modules/system/boot/stage-2-init.sh
index f9a2084ea9e8..5a2133f960e2 100755
--- a/nixos/modules/system/boot/stage-2-init.sh
+++ b/nixos/modules/system/boot/stage-2-init.sh
@@ -104,7 +104,10 @@ fi
 
 
 # Required by the activation script
-install -m 0755 -d /etc /etc/nixos
+install -m 0755 -d /etc
+if [ ! -h "/etc/nixos" ]; then
+    install -m 0755 -d /etc/nixos
+fi
 install -m 01777 -d /tmp
 
 
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index ac55461107fb..b6c3085c4f16 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -395,7 +395,9 @@ in
       description = lib.mdDoc ''
         The amount of time which can elapse after a reboot has been triggered
         before a watchdog hardware device will automatically reboot the system.
-        Valid time units include "ms", "s", "min", "h", "d", and "w".
+        Valid time units include "ms", "s", "min", "h", "d", and "w". If left
+        `null`, systemd will use its default of `10min`; see also {command}`man
+        5 systemd-system.conf`.
       '';
     };
 
diff --git a/nixos/modules/system/boot/systemd/initrd.nix b/nixos/modules/system/boot/systemd/initrd.nix
index 3f40a5b2dfa0..5d9fca7a605e 100644
--- a/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixos/modules/system/boot/systemd/initrd.nix
@@ -333,6 +333,14 @@ in {
       visible = "shallow";
       description = lib.mdDoc "Definition of slice configurations.";
     };
+
+    enableTpm2 = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable TPM2 support in the initrd.
+      '';
+    };
   };
 
   config = mkIf (config.boot.initrd.enable && cfg.enable) {
@@ -342,8 +350,8 @@ in {
       # systemd needs this for some features
       "autofs4"
       # systemd-cryptenroll
-      "tpm-tis"
-    ] ++ lib.optional (pkgs.stdenv.hostPlatform.system != "riscv64-linux") "tpm-crb";
+    ] ++ lib.optional cfg.enableTpm2 "tpm-tis"
+    ++ lib.optional (cfg.enableTpm2 && !(pkgs.stdenv.hostPlatform.isRiscV64 || pkgs.stdenv.hostPlatform.isArmv7)) "tpm-crb";
 
     boot.initrd.systemd = {
       initrdBin = [pkgs.bash pkgs.coreutils cfg.package.kmod cfg.package] ++ config.system.fsPackages;
@@ -421,11 +429,11 @@ in {
 
         # so NSS can look up usernames
         "${pkgs.glibc}/lib/libnss_files.so.2"
-      ] ++ optionals cfg.package.withCryptsetup [
+      ] ++ optionals (cfg.package.withCryptsetup && cfg.enableTpm2) [
         # tpm2 support
         "${cfg.package}/lib/cryptsetup/libcryptsetup-token-systemd-tpm2.so"
         pkgs.tpm2-tss
-
+      ] ++ optionals cfg.package.withCryptsetup [
         # fido2 support
         "${cfg.package}/lib/cryptsetup/libcryptsetup-token-systemd-fido2.so"
         "${pkgs.libfido2}/lib/libfido2.so.1"
diff --git a/nixos/modules/system/boot/systemd/user.nix b/nixos/modules/system/boot/systemd/user.nix
index 1b6398d2f929..64dc19633eca 100644
--- a/nixos/modules/system/boot/systemd/user.nix
+++ b/nixos/modules/system/boot/systemd/user.nix
@@ -230,5 +230,9 @@ in {
           });
         })
         cfg.tmpfiles.users;
+
+    system.userActivationScripts.tmpfiles = ''
+      ${config.systemd.package}/bin/systemd-tmpfiles --user --create --remove
+    '';
   };
 }
diff --git a/nixos/modules/tasks/bcache.nix b/nixos/modules/tasks/bcache.nix
index 35b922dc8a1d..68531a4d2fed 100644
--- a/nixos/modules/tasks/bcache.nix
+++ b/nixos/modules/tasks/bcache.nix
@@ -1,6 +1,10 @@
-{ config, lib, pkgs, ... }:
-
-{
+{ config, lib, pkgs, ... }: let
+  cfg = config.boot.bcache;
+in {
+  options.boot.bcache.enable = lib.mkEnableOption (lib.mdDoc "bcache mount support") // {
+    default = true;
+    example = false;
+  };
   options.boot.initrd.services.bcache.enable = lib.mkEnableOption (lib.mdDoc "bcache support in the initrd") // {
     description = lib.mdDoc ''
       *This will only be used when systemd is used in stage 1.*
@@ -9,7 +13,7 @@
     '';
   };
 
-  config = {
+  config = lib.mkIf cfg.enable {
 
     environment.systemPackages = [ pkgs.bcache-tools ];
 
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index 21d604bee6e3..5cf863c87f27 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -662,6 +662,11 @@ in
         ];
       };
 
+      # ZFS already has its own scheduler. Without this my(@Artturin) computer froze for a second when I nix build something.
+      services.udev.extraRules = ''
+        ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*|mmcblk[0-9]*p[0-9]*|nvme[0-9]*n[0-9]*p[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/scheduler}="none"
+      '';
+
       environment.etc = genAttrs
         (map
           (file: "zfs/zed.d/${file}")
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index eb1c7512d920..0d4033ca9430 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -1396,14 +1396,12 @@ in
     security.apparmor.policies."bin.ping".profile = lib.mkIf config.security.apparmor.policies."bin.ping".enable (lib.mkAfter ''
       /run/wrappers/bin/ping {
         include <abstractions/base>
-        include <nixos/security.wrappers>
+        include <nixos/security.wrappers/ping>
         rpx /run/wrappers/wrappers.*/ping,
       }
       /run/wrappers/wrappers.*/ping {
         include <abstractions/base>
-        include <nixos/security.wrappers>
-        r /run/wrappers/wrappers.*/ping.real,
-        mrpx ${config.security.wrappers.ping.source},
+        include <nixos/security.wrappers/ping>
         capability net_raw,
         capability setpcap,
       }
diff --git a/nixos/modules/tasks/swraid.nix b/nixos/modules/tasks/swraid.nix
index f624294565b0..61b3682e0f68 100644
--- a/nixos/modules/tasks/swraid.nix
+++ b/nixos/modules/tasks/swraid.nix
@@ -2,6 +2,13 @@
 
   cfg = config.boot.swraid;
 
+  mdadm_conf = config.environment.etc."mdadm.conf";
+
+  enable_implicitly_for_old_state_versions = lib.versionOlder config.system.stateVersion "23.11";
+
+  minimum_config_is_set = config_text:
+    (builtins.match ".*(MAILADDR|PROGRAM).*" mdadm_conf.text) != null;
+
 in {
   imports = [
     (lib.mkRenamedOptionModule [ "boot" "initrd" "services" "swraid" "enable" ] [ "boot" "swraid" "enable" ])
@@ -24,7 +31,7 @@ in {
         should detect it correctly in the standard installation
         procedure.
       '';
-      default = lib.versionOlder config.system.stateVersion "23.11";
+      default = enable_implicitly_for_old_state_versions;
       defaultText = lib.mdDoc "`true` if stateVersion is older than 23.11";
     };
 
@@ -36,8 +43,14 @@ in {
   };
 
   config = lib.mkIf cfg.enable {
+    warnings = lib.mkIf
+        ( !enable_implicitly_for_old_state_versions && !minimum_config_is_set mdadm_conf)
+        [ "mdadm: Neither MAILADDR nor PROGRAM has been set. This will cause the `mdmon` service to crash." ];
+
     environment.systemPackages = [ pkgs.mdadm ];
 
+    environment.etc."mdadm.conf".text = lib.mkAfter cfg.mdadmConf;
+
     services.udev.packages = [ pkgs.mdadm ];
 
     systemd.packages = [ pkgs.mdadm ];
@@ -59,12 +72,10 @@ in {
         $out/bin/mdadm --version
       '';
 
-      extraFiles."/etc/mdadm.conf".source = pkgs.writeText "mdadm.conf" config.boot.swraid.mdadmConf;
+      extraFiles."/etc/mdadm.conf".source = pkgs.writeText "mdadm.conf" mdadm_conf.text;
 
       systemd = {
-        contents."/etc/mdadm.conf" = lib.mkIf (cfg.mdadmConf != "") {
-          text = cfg.mdadmConf;
-        };
+        contents."/etc/mdadm.conf".text = mdadm_conf.text;
 
         packages = [ pkgs.mdadm ];
         initrdBin = [ pkgs.mdadm ];
diff --git a/nixos/modules/virtualisation/anbox.nix b/nixos/modules/virtualisation/anbox.nix
index c7e9e23c4c92..523d9a9576ef 100644
--- a/nixos/modules/virtualisation/anbox.nix
+++ b/nixos/modules/virtualisation/anbox.nix
@@ -5,7 +5,7 @@ with lib;
 let
 
   cfg = config.virtualisation.anbox;
-  kernelPackages = config.boot.kernelPackages;
+
   addrOpts = v: addr: pref: name: {
     address = mkOption {
       default = addr;
@@ -25,6 +25,28 @@ let
     };
   };
 
+  finalImage = if cfg.imageModifications == "" then cfg.image else ( pkgs.callPackage (
+    { runCommandNoCC, squashfsTools }:
+
+    runCommandNoCC "${cfg.image.name}-modified.img" {
+      nativeBuildInputs = [
+        squashfsTools
+      ];
+    } ''
+      echo "-> Extracting Anbox root image..."
+      unsquashfs -dest rootfs ${cfg.image}
+
+      echo "-> Modifying Anbox root image..."
+      (
+      cd rootfs
+      ${cfg.imageModifications}
+      )
+
+      echo "-> Packing modified Anbox root image..."
+      mksquashfs rootfs $out -comp xz -no-xattrs -all-root
+    ''
+  ) { });
+
 in
 
 {
@@ -42,6 +64,18 @@ in
       '';
     };
 
+    imageModifications = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Commands to edit the image filesystem.
+
+        This can be used to e.g. bundle a privileged F-Droid.
+
+        Commands are ran with PWD being at the root of the filesystem.
+      '';
+    };
+
     extraInit = mkOption {
       type = types.lines;
       default = "";
@@ -67,16 +101,19 @@ in
   config = mkIf cfg.enable {
 
     assertions = singleton {
-      assertion = versionAtLeast (getVersion config.boot.kernelPackages.kernel) "4.18";
-      message = "Anbox needs user namespace support to work properly";
+      assertion = with config.boot.kernelPackages; kernelAtLeast "5.5" && kernelOlder "5.18";
+      message = "Anbox needs a kernel with binder and ashmem support";
     };
 
     environment.systemPackages = with pkgs; [ anbox ];
 
-    services.udev.extraRules = ''
-      KERNEL=="ashmem", NAME="%k", MODE="0666"
-      KERNEL=="binder*", NAME="%k", MODE="0666"
-    '';
+    systemd.mounts = singleton {
+      requiredBy = [ "anbox-container-manager.service" ];
+      description = "Anbox Binder File System";
+      what = "binder";
+      where = "/dev/binderfs";
+      type = "binder";
+    };
 
     virtualisation.lxc.enable = true;
     networking.bridges.anbox0.interfaces = [];
@@ -87,6 +124,9 @@ in
       internalInterfaces = [ "anbox0" ];
     };
 
+    # Ensures NetworkManager doesn't touch anbox0
+    networking.networkmanager.unmanaged = [ "anbox0" ];
+
     systemd.services.anbox-container-manager = let
       anboxloc = "/var/lib/anbox";
     in {
@@ -121,12 +161,13 @@ in
         ExecStart = ''
           ${pkgs.anbox}/bin/anbox container-manager \
             --data-path=${anboxloc} \
-            --android-image=${cfg.image} \
+            --android-image=${finalImage} \
             --container-network-address=${cfg.ipv4.container.address} \
             --container-network-gateway=${cfg.ipv4.gateway.address} \
             --container-network-dns-servers=${cfg.ipv4.dns} \
             --use-rootfs-overlay \
-            --privileged
+            --privileged \
+            --daemon
         '';
       };
     };
diff --git a/nixos/modules/virtualisation/google-compute-config.nix b/nixos/modules/virtualisation/google-compute-config.nix
index cf94ce0faf36..3c503f027d79 100644
--- a/nixos/modules/virtualisation/google-compute-config.nix
+++ b/nixos/modules/virtualisation/google-compute-config.nix
@@ -39,7 +39,7 @@ in
   # Allow root logins only using SSH keys
   # and disable password authentication in general
   services.openssh.enable = true;
-  services.openssh.settings.PermitRootLogin = "prohibit-password";
+  services.openssh.settings.PermitRootLogin = mkDefault "prohibit-password";
   services.openssh.settings.PasswordAuthentication = mkDefault false;
 
   # enable OS Login. This also requires setting enable-oslogin=TRUE metadata on
diff --git a/nixos/modules/virtualisation/lxc-container.nix b/nixos/modules/virtualisation/lxc-container.nix
index 55b285b69147..9402d3bf37d0 100644
--- a/nixos/modules/virtualisation/lxc-container.nix
+++ b/nixos/modules/virtualisation/lxc-container.nix
@@ -1,96 +1,16 @@
 { lib, config, pkgs, ... }:
 
-with lib;
-
 let
-  templateSubmodule = { ... }: {
-    options = {
-      enable = mkEnableOption (lib.mdDoc "this template");
-
-      target = mkOption {
-        description = lib.mdDoc "Path in the container";
-        type = types.path;
-      };
-      template = mkOption {
-        description = lib.mdDoc ".tpl file for rendering the target";
-        type = types.path;
-      };
-      when = mkOption {
-        description = lib.mdDoc "Events which trigger a rewrite (create, copy)";
-        type = types.listOf (types.str);
-      };
-      properties = mkOption {
-        description = lib.mdDoc "Additional properties";
-        type = types.attrs;
-        default = {};
-      };
-    };
-  };
-
-  toYAML = name: data: pkgs.writeText name (generators.toYAML {} data);
-
   cfg = config.virtualisation.lxc;
-  templates = if cfg.templates != {} then let
-    list = mapAttrsToList (name: value: { inherit name; } // value)
-      (filterAttrs (name: value: value.enable) cfg.templates);
-  in
-    {
-      files = map (tpl: {
-        source = tpl.template;
-        target = "/templates/${tpl.name}.tpl";
-      }) list;
-      properties = listToAttrs (map (tpl: nameValuePair tpl.target {
-        when = tpl.when;
-        template = "${tpl.name}.tpl";
-        properties = tpl.properties;
-      }) list);
-    }
-  else { files = []; properties = {}; };
-
-in
-{
+in {
   imports = [
-    ../installer/cd-dvd/channel.nix
-    ../profiles/clone-config.nix
-    ../profiles/minimal.nix
+    ./lxc-instance-common.nix
   ];
 
   options = {
     virtualisation.lxc = {
-      templates = mkOption {
-        description = lib.mdDoc "Templates for LXD";
-        type = types.attrsOf (types.submodule (templateSubmodule));
-        default = {};
-        example = literalExpression ''
-          {
-            # create /etc/hostname on container creation. also requires networking.hostName = "" to be set
-            "hostname" = {
-              enable = true;
-              target = "/etc/hostname";
-              template = builtins.toFile "hostname.tpl" "{{ container.name }}";
-              when = [ "create" ];
-            };
-            # create /etc/nixos/hostname.nix with a configuration for keeping the hostname applied
-            "hostname-nix" = {
-              enable = true;
-              target = "/etc/nixos/hostname.nix";
-              template = builtins.toFile "hostname-nix.tpl" "{ ... }: { networking.hostName = \"{{ container.name }}\"; }";
-              # copy keeps the file updated when the container is changed
-              when = [ "create" "copy" ];
-            };
-            # copy allow the user to specify a custom configuration.nix
-            "configuration-nix" = {
-              enable = true;
-              target = "/etc/nixos/configuration.nix";
-              template = builtins.toFile "configuration-nix" "{{ config_get(\"user.user-data\", properties.default) }}";
-              when = [ "create" ];
-            };
-          };
-        '';
-      };
-
-      privilegedContainer = mkOption {
-        type = types.bool;
+      privilegedContainer = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           Whether this LXC container will be running as a privileged container or not. If set to `true` then
@@ -116,24 +36,6 @@ in
         ${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
       '';
 
-    system.build.metadata = pkgs.callPackage ../../lib/make-system-tarball.nix {
-      contents = [
-        {
-          source = toYAML "metadata.yaml" {
-            architecture = builtins.elemAt (builtins.match "^([a-z0-9_]+).+" (toString pkgs.system)) 0;
-            creation_date = 1;
-            properties = {
-              description = "${config.system.nixos.distroName} ${config.system.nixos.codeName} ${config.system.nixos.label} ${pkgs.system}";
-              os = "${config.system.nixos.distroId}";
-              release = "${config.system.nixos.codeName}";
-            };
-            templates = templates.properties;
-          };
-          target = "/metadata.yaml";
-        }
-      ] ++ templates.files;
-    };
-
     # TODO: build rootfs as squashfs for faster unpack
     system.build.tarball = pkgs.callPackage ../../lib/make-system-tarball.nix {
       extraArgs = "--owner=0";
@@ -180,7 +82,7 @@ in
           ProtectKernelTunables=no
           NoNewPrivileges=no
           LoadCredential=
-        '' + optionalString cfg.privilegedContainer ''
+        '' + lib.optionalString cfg.privilegedContainer ''
           # Additional settings for privileged containers
           ProtectHome=no
           ProtectSystem=no
@@ -193,28 +95,8 @@ in
       })
     ];
 
-    # Allow the user to login as root without password.
-    users.users.root.initialHashedPassword = mkOverride 150 "";
-
-    system.activationScripts.installInitScript = mkForce ''
+    system.activationScripts.installInitScript = lib.mkForce ''
       ln -fs $systemConfig/init /sbin/init
     '';
-
-    # Some more help text.
-    services.getty.helpLine =
-      ''
-
-        Log in as "root" with an empty password.
-      '';
-
-    # Containers should be light-weight, so start sshd on demand.
-    services.openssh.enable = mkDefault true;
-    services.openssh.startWhenNeeded = mkDefault true;
-
-    # As this is intended as a standalone image, undo some of the minimal profile stuff
-    environment.noXlibs = false;
-    documentation.enable = true;
-    documentation.nixos.enable = true;
-    services.logrotate.enable = true;
   };
 }
diff --git a/nixos/modules/virtualisation/lxc-image-metadata.nix b/nixos/modules/virtualisation/lxc-image-metadata.nix
new file mode 100644
index 000000000000..2c0568b4c468
--- /dev/null
+++ b/nixos/modules/virtualisation/lxc-image-metadata.nix
@@ -0,0 +1,104 @@
+{ lib, config, pkgs, ... }:
+
+let
+  templateSubmodule = {...}: {
+    options = {
+      enable = lib.mkEnableOption "this template";
+
+      target = lib.mkOption {
+        description = "Path in the container";
+        type = lib.types.path;
+      };
+      template = lib.mkOption {
+        description = ".tpl file for rendering the target";
+        type = lib.types.path;
+      };
+      when = lib.mkOption {
+        description = "Events which trigger a rewrite (create, copy)";
+        type = lib.types.listOf (lib.types.str);
+      };
+      properties = lib.mkOption {
+        description = "Additional properties";
+        type = lib.types.attrs;
+        default = {};
+      };
+    };
+  };
+
+  toYAML = name: data: pkgs.writeText name (lib.generators.toYAML {} data);
+
+  cfg = config.virtualisation.lxc;
+  templates = if cfg.templates != {} then let
+    list = lib.mapAttrsToList (name: value: { inherit name; } // value)
+      (lib.filterAttrs (name: value: value.enable) cfg.templates);
+  in
+    {
+      files = map (tpl: {
+        source = tpl.template;
+        target = "/templates/${tpl.name}.tpl";
+      }) list;
+      properties = lib.listToAttrs (map (tpl: lib.nameValuePair tpl.target {
+        when = tpl.when;
+        template = "${tpl.name}.tpl";
+        properties = tpl.properties;
+      }) list);
+    }
+  else { files = []; properties = {}; };
+
+in {
+  options = {
+    virtualisation.lxc = {
+      templates = lib.mkOption {
+        description = "Templates for LXD";
+        type = lib.types.attrsOf (lib.types.submodule templateSubmodule);
+        default = {};
+        example = lib.literalExpression ''
+          {
+            # create /etc/hostname on container creation
+            "hostname" = {
+              enable = true;
+              target = "/etc/hostname";
+              template = builtins.writeFile "hostname.tpl" "{{ container.name }}";
+              when = [ "create" ];
+            };
+            # create /etc/nixos/hostname.nix with a configuration for keeping the hostname applied
+            "hostname-nix" = {
+              enable = true;
+              target = "/etc/nixos/hostname.nix";
+              template = builtins.writeFile "hostname-nix.tpl" "{ ... }: { networking.hostName = "{{ container.name }}"; }";
+              # copy keeps the file updated when the container is changed
+              when = [ "create" "copy" ];
+            };
+            # copy allow the user to specify a custom configuration.nix
+            "configuration-nix" = {
+              enable = true;
+              target = "/etc/nixos/configuration.nix";
+              template = builtins.writeFile "configuration-nix" "{{ config_get(\"user.user-data\", properties.default) }}";
+              when = [ "create" ];
+            };
+          };
+        '';
+      };
+    };
+  };
+
+  config = {
+    system.build.metadata = pkgs.callPackage ../../lib/make-system-tarball.nix {
+      contents = [
+        {
+          source = toYAML "metadata.yaml" {
+            architecture = builtins.elemAt (builtins.match "^([a-z0-9_]+).+" (toString pkgs.system)) 0;
+            creation_date = 1;
+            properties = {
+              description = "${config.system.nixos.distroName} ${config.system.nixos.codeName} ${config.system.nixos.label} ${pkgs.system}";
+              os = "${config.system.nixos.distroId}";
+              release = "${config.system.nixos.codeName}";
+            };
+            templates = templates.properties;
+          };
+          target = "/metadata.yaml";
+        }
+      ] ++ templates.files;
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/lxc-instance-common.nix b/nixos/modules/virtualisation/lxc-instance-common.nix
new file mode 100644
index 000000000000..d6a0e05fb1c9
--- /dev/null
+++ b/nixos/modules/virtualisation/lxc-instance-common.nix
@@ -0,0 +1,30 @@
+{lib, ...}:
+
+{
+  imports = [
+    ./lxc-image-metadata.nix
+
+    ../installer/cd-dvd/channel.nix
+    ../profiles/clone-config.nix
+    ../profiles/minimal.nix
+  ];
+
+  # Allow the user to login as root without password.
+  users.users.root.initialHashedPassword = lib.mkOverride 150 "";
+
+  # Some more help text.
+  services.getty.helpLine = ''
+
+    Log in as "root" with an empty password.
+  '';
+
+  # Containers should be light-weight, so start sshd on demand.
+  services.openssh.enable = lib.mkDefault true;
+  services.openssh.startWhenNeeded = lib.mkDefault true;
+
+  # As this is intended as a standalone image, undo some of the minimal profile stuff
+  environment.noXlibs = false;
+  documentation.enable = true;
+  documentation.nixos.enable = true;
+  services.logrotate.enable = true;
+}
diff --git a/nixos/modules/virtualisation/lxd-virtual-machine.nix b/nixos/modules/virtualisation/lxd-virtual-machine.nix
new file mode 100644
index 000000000000..ba729465ec2f
--- /dev/null
+++ b/nixos/modules/virtualisation/lxd-virtual-machine.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+let
+  serialDevice =
+    if pkgs.stdenv.hostPlatform.isx86
+    then "ttyS0"
+    else "ttyAMA0"; # aarch64
+in {
+  imports = [
+    ./lxc-instance-common.nix
+
+    ../profiles/qemu-guest.nix
+  ];
+
+  config = {
+    system.build.qemuImage = import ../../lib/make-disk-image.nix {
+      inherit pkgs lib config;
+
+      partitionTableType = "efi";
+      format = "qcow2-compressed";
+      copyChannel = true;
+    };
+
+    fileSystems = {
+      "/" = {
+        device = "/dev/disk/by-label/nixos";
+        autoResize = true;
+        fsType = "ext4";
+      };
+      "/boot" = {
+        device = "/dev/disk/by-label/ESP";
+        fsType = "vfat";
+      };
+    };
+
+    boot.growPartition = true;
+    boot.loader.systemd-boot.enable = true;
+
+    # image building needs to know what device to install bootloader on
+    boot.loader.grub.device = "/dev/vda";
+
+    boot.kernelParams = ["console=tty1" "console=${serialDevice}"];
+
+    virtualisation.lxd.agent.enable = lib.mkDefault true;
+  };
+}
diff --git a/nixos/modules/virtualisation/lxd.nix b/nixos/modules/virtualisation/lxd.nix
index e22ba9a0ae2c..e30fbebb662c 100644
--- a/nixos/modules/virtualisation/lxd.nix
+++ b/nixos/modules/virtualisation/lxd.nix
@@ -2,21 +2,20 @@
 
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.virtualisation.lxd;
+  preseedFormat = pkgs.formats.yaml {};
 in {
   imports = [
-    (mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
+    (lib.mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
   ];
 
   ###### interface
 
   options = {
     virtualisation.lxd = {
-      enable = mkOption {
-        type = types.bool;
+      enable = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           This option enables lxd, a daemon that manages
@@ -32,28 +31,28 @@ in {
         '';
       };
 
-      package = mkOption {
-        type = types.package;
+      package = lib.mkOption {
+        type = lib.types.package;
         default = pkgs.lxd;
-        defaultText = literalExpression "pkgs.lxd";
+        defaultText = lib.literalExpression "pkgs.lxd";
         description = lib.mdDoc ''
           The LXD package to use.
         '';
       };
 
-      lxcPackage = mkOption {
-        type = types.package;
+      lxcPackage = lib.mkOption {
+        type = lib.types.package;
         default = pkgs.lxc;
-        defaultText = literalExpression "pkgs.lxc";
+        defaultText = lib.literalExpression "pkgs.lxc";
         description = lib.mdDoc ''
           The LXC package to use with LXD (required for AppArmor profiles).
         '';
       };
 
-      zfsSupport = mkOption {
-        type = types.bool;
+      zfsSupport = lib.mkOption {
+        type = lib.types.bool;
         default = config.boot.zfs.enabled;
-        defaultText = literalExpression "config.boot.zfs.enabled";
+        defaultText = lib.literalExpression "config.boot.zfs.enabled";
         description = lib.mdDoc ''
           Enables lxd to use zfs as a storage for containers.
 
@@ -62,8 +61,8 @@ in {
         '';
       };
 
-      recommendedSysctlSettings = mkOption {
-        type = types.bool;
+      recommendedSysctlSettings = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           Enables various settings to avoid common pitfalls when
@@ -75,8 +74,67 @@ in {
         '';
       };
 
-      startTimeout = mkOption {
-        type = types.int;
+      preseed = lib.mkOption {
+        type = lib.types.nullOr (lib.types.submodule {
+          freeformType = preseedFormat.type;
+        });
+
+        default = null;
+
+        description = lib.mdDoc ''
+          Configuration for LXD preseed, see
+          <https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#initialize-preseed>
+          for supported values.
+
+          Changes to this will be re-applied to LXD which will overwrite existing entities or create missing ones,
+          but entities will *not* be removed by preseed.
+        '';
+
+        example = lib.literalExpression ''
+          {
+            networks = [
+              {
+                name = "lxdbr0";
+                type = "bridge";
+                config = {
+                  "ipv4.address" = "10.0.100.1/24";
+                  "ipv4.nat" = "true";
+                };
+              }
+            ];
+            profiles = [
+              {
+                name = "default";
+                devices = {
+                  eth0 = {
+                    name = "eth0";
+                    network = "lxdbr0";
+                    type = "nic";
+                  };
+                  root = {
+                    path = "/";
+                    pool = "default";
+                    size = "35GiB";
+                    type = "disk";
+                  };
+                };
+              }
+            ];
+            storage_pools = [
+              {
+                name = "default";
+                driver = "dir";
+                config = {
+                  source = "/var/lib/lxd/storage-pools/default";
+                };
+              }
+            ];
+          }
+        '';
+      };
+
+      startTimeout = lib.mkOption {
+        type = lib.types.int;
         default = 600;
         apply = toString;
         description = lib.mdDoc ''
@@ -91,13 +149,13 @@ in {
           Enables the (experimental) LXD UI.
         '');
 
-        package = mkPackageOption pkgs.lxd-unwrapped "ui" { };
+        package = lib.mkPackageOption pkgs.lxd-unwrapped "ui" { };
       };
     };
   };
 
   ###### implementation
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     environment.systemPackages = [ cfg.package ];
 
     # Note: the following options are also declared in virtualisation.lxc, but
@@ -139,19 +197,19 @@ in {
       wantedBy = [ "multi-user.target" ];
       after = [
         "network-online.target"
-        (mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
+        (lib.mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
       ];
       requires = [
         "network-online.target"
         "lxd.socket"
-        (mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
+        (lib.mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
       ];
       documentation = [ "man:lxd(1)" ];
 
       path = [ pkgs.util-linux ]
-        ++ optional cfg.zfsSupport config.boot.zfs.package;
+        ++ lib.optional cfg.zfsSupport config.boot.zfs.package;
 
-      environment = mkIf (cfg.ui.enable) {
+      environment = lib.mkIf (cfg.ui.enable) {
         "LXD_UI" = cfg.ui.package;
       };
 
@@ -173,11 +231,26 @@ in {
         # By default, `lxd` loads configuration files from hard-coded
         # `/usr/share/lxc/config` - since this is a no-go for us, we have to
         # explicitly tell it where the actual configuration files are
-        Environment = mkIf (config.virtualisation.lxc.lxcfs.enable)
+        Environment = lib.mkIf (config.virtualisation.lxc.lxcfs.enable)
           "LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
       };
     };
 
+    systemd.services.lxd-preseed = lib.mkIf (cfg.preseed != null) {
+      description = "LXD initialization with preseed file";
+      wantedBy = ["multi-user.target"];
+      requires = ["lxd.service"];
+      after = ["lxd.service"];
+
+      script = ''
+        ${pkgs.coreutils}/bin/cat ${preseedFormat.generate "lxd-preseed.yaml" cfg.preseed} | ${cfg.package}/bin/lxd init --preseed
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+      };
+    };
+
     users.groups.lxd = {};
 
     users.users.root = {
@@ -185,7 +258,7 @@ in {
       subGidRanges = [ { startGid = 1000000; count = 65536; } ];
     };
 
-    boot.kernel.sysctl = mkIf cfg.recommendedSysctlSettings {
+    boot.kernel.sysctl = lib.mkIf cfg.recommendedSysctlSettings {
       "fs.inotify.max_queued_events" = 1048576;
       "fs.inotify.max_user_instances" = 1048576;
       "fs.inotify.max_user_watches" = 1048576;
@@ -196,7 +269,7 @@ in {
       "kernel.keys.maxkeys" = 2000;
     };
 
-    boot.kernelModules = [ "veth" "xt_comment" "xt_CHECKSUM" "xt_MASQUERADE" ]
-      ++ optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
+    boot.kernelModules = [ "veth" "xt_comment" "xt_CHECKSUM" "xt_MASQUERADE" "vhost_vsock" ]
+      ++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
   };
 }
diff --git a/nixos/modules/virtualisation/oci-common.nix b/nixos/modules/virtualisation/oci-common.nix
new file mode 100644
index 000000000000..ac9405e3ecfa
--- /dev/null
+++ b/nixos/modules/virtualisation/oci-common.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.oci;
+in
+{
+  imports = [ ../profiles/qemu-guest.nix ];
+
+  # Taken from /proc/cmdline of Ubuntu 20.04.2 LTS on OCI
+  boot.kernelParams = [
+    "nvme.shutdown_timeout=10"
+    "nvme_core.shutdown_timeout=10"
+    "libiscsi.debug_libiscsi_eh=1"
+    "crash_kexec_post_notifiers"
+
+    # VNC console
+    "console=tty1"
+
+    # x86_64-linux
+    "console=ttyS0"
+
+    # aarch64-linux
+    "console=ttyAMA0,115200"
+  ];
+
+  boot.growPartition = true;
+
+  fileSystems."/" = {
+    device = "/dev/disk/by-label/nixos";
+    fsType = "ext4";
+    autoResize = true;
+  };
+
+  fileSystems."/boot" = lib.mkIf cfg.efi {
+    device = "/dev/disk/by-label/ESP";
+    fsType = "vfat";
+  };
+
+  boot.loader.efi.canTouchEfiVariables = false;
+  boot.loader.grub = {
+    device = if cfg.efi then "nodev" else "/dev/sda";
+    splashImage = null;
+    extraConfig = ''
+      serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
+      terminal_input --append serial
+      terminal_output --append serial
+    '';
+    efiInstallAsRemovable = cfg.efi;
+    efiSupport = cfg.efi;
+  };
+
+  # https://docs.oracle.com/en-us/iaas/Content/Compute/Tasks/configuringntpservice.htm#Configuring_the_Oracle_Cloud_Infrastructure_NTP_Service_for_an_Instance
+  networking.timeServers = [ "169.254.169.254" ];
+
+  services.openssh.enable = true;
+
+  # Otherwise the instance may not have a working network-online.target,
+  # making the fetch-ssh-keys.service fail
+  networking.useNetworkd = true;
+}
diff --git a/nixos/modules/virtualisation/oci-config-user.nix b/nixos/modules/virtualisation/oci-config-user.nix
new file mode 100644
index 000000000000..70c0b34efe7a
--- /dev/null
+++ b/nixos/modules/virtualisation/oci-config-user.nix
@@ -0,0 +1,12 @@
+{ modulesPath, ... }:
+
+{
+  # To build the configuration or use nix-env, you need to run
+  # either nixos-rebuild --upgrade or nix-channel --update
+  # to fetch the nixos channel.
+
+  # This configures everything but bootstrap services,
+  # which only need to be run once and have already finished
+  # if you are able to see this comment.
+  imports = [ "${modulesPath}/virtualisation/oci-common.nix" ];
+}
diff --git a/nixos/modules/virtualisation/oci-image.nix b/nixos/modules/virtualisation/oci-image.nix
new file mode 100644
index 000000000000..d4af5016dd71
--- /dev/null
+++ b/nixos/modules/virtualisation/oci-image.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.oci;
+in
+{
+  imports = [ ./oci-common.nix ];
+
+  config = {
+    system.build.OCIImage = import ../../lib/make-disk-image.nix {
+      inherit config lib pkgs;
+      name = "oci-image";
+      configFile = ./oci-config-user.nix;
+      format = "qcow2";
+      diskSize = 8192;
+      partitionTableType = if cfg.efi then "efi" else "legacy";
+    };
+
+    systemd.services.fetch-ssh-keys = {
+      description = "Fetch authorized_keys for root user";
+
+      wantedBy = [ "sshd.service" ];
+      before = [ "sshd.service" ];
+
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+
+      path  = [ pkgs.coreutils pkgs.curl ];
+      script = ''
+        mkdir -m 0700 -p /root/.ssh
+        if [ -f /root/.ssh/authorized_keys ]; then
+          echo "Authorized keys have already been downloaded"
+        else
+          echo "Downloading authorized keys from Instance Metadata Service v2"
+          curl -s -S -L \
+            -H "Authorization: Bearer Oracle" \
+            -o /root/.ssh/authorized_keys \
+            http://169.254.169.254/opc/v2/instance/metadata/ssh_authorized_keys
+          chmod 600 /root/.ssh/authorized_keys
+        fi
+      '';
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        StandardError = "journal+console";
+        StandardOutput = "journal+console";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/oci-options.nix b/nixos/modules/virtualisation/oci-options.nix
new file mode 100644
index 000000000000..0dfedc6a530c
--- /dev/null
+++ b/nixos/modules/virtualisation/oci-options.nix
@@ -0,0 +1,14 @@
+{ config, lib, pkgs, ... }:
+{
+  options = {
+    oci = {
+      efi = lib.mkOption {
+        default = true;
+        internal = true;
+        description = ''
+          Whether the OCI instance is using EFI.
+        '';
+      };
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
index d0a5ddd87ccf..74c3e1ecd03f 100644
--- a/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -647,7 +647,7 @@ in
         import pkgs.path { system = "x86_64-darwin"; }
       '';
       description = lib.mdDoc ''
-        pkgs set to use for the host-specific packages of the vm runner.
+        Package set to use for the host-specific packages of the VM runner.
         Changing this to e.g. a Darwin package set allows running NixOS VMs on Darwin.
       '';
     };
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index 125086294d41..29dcdab7d18e 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -158,6 +158,11 @@ in rec {
         (onFullSupported "nixpkgs.emacs")
         (onFullSupported "nixpkgs.jdk")
         ["nixpkgs.tarball"]
+
+        # Ensure that nixpkgs-check-by-name is available in all release channels and nixos-unstable,
+        # so that a pre-built version can be used in CI for PR's on the corresponding development branches.
+        # See ../pkgs/test/nixpkgs-check-by-name/README.md
+        (onSystems ["x86_64-linux"] "nixpkgs.tests.nixpkgs-check-by-name")
       ];
     };
 }
diff --git a/nixos/release.nix b/nixos/release.nix
index 6da6faab73be..abaa7ef9a711 100644
--- a/nixos/release.nix
+++ b/nixos/release.nix
@@ -44,10 +44,13 @@ let
   pkgs = import ./.. { system = "x86_64-linux"; };
 
 
-  versionModule =
-    { system.nixos.versionSuffix = versionSuffix;
-      system.nixos.revision = nixpkgs.rev or nixpkgs.shortRev;
-    };
+  versionModule = { config, ... }: {
+    system.nixos.versionSuffix = versionSuffix;
+    system.nixos.revision = nixpkgs.rev or nixpkgs.shortRev;
+
+    # At creation time we do not have state yet, so just default to latest.
+    system.stateVersion = config.system.nixos.version;
+  };
 
   makeModules = module: rest: [ configuration versionModule module rest ];
 
@@ -310,7 +313,7 @@ in rec {
   );
 
   # An image that can be imported into lxd and used for container creation
-  lxdImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+  lxdContainerImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
 
     with import ./.. { inherit system; };
 
@@ -319,14 +322,46 @@ in rec {
       modules =
         [ configuration
           versionModule
-          ./maintainers/scripts/lxd/lxd-image.nix
+          ./maintainers/scripts/lxd/lxd-container-image.nix
         ];
     }).config.system.build.tarball)
 
   );
 
   # Metadata for the lxd image
-  lxdMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+  lxdContainerMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/lxd/lxd-container-image.nix
+        ];
+    }).config.system.build.metadata)
+
+  );
+
+  # An image that can be imported into lxd and used for container creation
+  lxdVirtualMachineImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/lxd/lxd-virtual-machine-image.nix
+        ];
+    }).config.system.build.qemuImage)
+
+  );
+
+  # Metadata for the lxd image
+  lxdVirtualMachineImageMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
 
     with import ./.. { inherit system; };
 
@@ -335,7 +370,7 @@ in rec {
       modules =
         [ configuration
           versionModule
-          ./maintainers/scripts/lxd/lxd-image.nix
+          ./maintainers/scripts/lxd/lxd-virtual-machine-image.nix
         ];
     }).config.system.build.metadata)
 
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index 4d220b9747aa..e5f2d4c7934a 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -18,7 +18,7 @@
   dnsConfig = nodes: {
     dnsProvider = "exec";
     dnsPropagationCheck = false;
-    credentialsFile = pkgs.writeText "wildcard.env" ''
+    environmentFile = pkgs.writeText "wildcard.env" ''
       EXEC_PATH=${dnsScript nodes}
       EXEC_POLLING_INTERVAL=1
       EXEC_PROPAGATION_TIMEOUT=1
@@ -266,6 +266,37 @@ in {
           }
         ];
 
+        concurrency-limit.configuration = {pkgs, ...}: lib.mkMerge [
+          webserverBasicConfig {
+            security.acme.maxConcurrentRenewals = 1;
+
+            services.nginx.virtualHosts = {
+              "f.example.test" = vhostBase // {
+                enableACME = true;
+              };
+              "g.example.test" = vhostBase // {
+                enableACME = true;
+              };
+              "h.example.test" = vhostBase // {
+                enableACME = true;
+              };
+            };
+
+            systemd.services = {
+              # check for mutual exclusion of starting renew services
+              "acme-f.example.test".serviceConfig.ExecPreStart = "+" + (pkgs.writeShellScript "test-f" ''
+                test "$(systemctl is-active acme-{g,h}.example.test.service | grep activating | wc -l)" -le 0
+                '');
+              "acme-g.example.test".serviceConfig.ExecPreStart = "+" + (pkgs.writeShellScript "test-g" ''
+                test "$(systemctl is-active acme-{f,h}.example.test.service | grep activating | wc -l)" -le 0
+                '');
+              "acme-h.example.test".serviceConfig.ExecPreStart = "+" + (pkgs.writeShellScript "test-h" ''
+                test "$(systemctl is-active acme-{g,f}.example.test.service | grep activating | wc -l)" -le 0
+                '');
+              };
+          }
+        ];
+
         # Test lego internal server (listenHTTP option)
         # Also tests useRoot option
         lego-server.configuration = { ... }: {
@@ -297,7 +328,7 @@ in {
 
           services.caddy = {
             enable = true;
-            virtualHosts."a.exmaple.test" = {
+            virtualHosts."a.example.test" = {
               useACMEHost = "example.test";
               extraConfig = ''
                 root * ${documentRoot}
@@ -591,6 +622,15 @@ in {
           webserver.wait_for_unit("nginx.service")
           check_connection(client, "slow.example.test")
 
+      with subtest("Can limit concurrency of running renewals"):
+          switch_to(webserver, "concurrency-limit")
+          webserver.wait_for_unit("acme-finished-f.example.test.target")
+          webserver.wait_for_unit("acme-finished-g.example.test.target")
+          webserver.wait_for_unit("acme-finished-h.example.test.target")
+          check_connection(client, "f.example.test")
+          check_connection(client, "g.example.test")
+          check_connection(client, "h.example.test")
+
       with subtest("Works with caddy"):
           switch_to(webserver, "caddy")
           webserver.wait_for_unit("acme-finished-example.test.target")
diff --git a/nixos/tests/adguardhome.nix b/nixos/tests/adguardhome.nix
index 9f8ddc33f57e..a6f790b83f5f 100644
--- a/nixos/tests/adguardhome.nix
+++ b/nixos/tests/adguardhome.nix
@@ -7,7 +7,6 @@
     emptyConf = { lib, ... }: {
       services.adguardhome = {
         enable = true;
-        settings = {};
       };
     };
 
diff --git a/nixos/tests/akkoma.nix b/nixos/tests/akkoma.nix
index 7115c0beed34..287e2d485999 100644
--- a/nixos/tests/akkoma.nix
+++ b/nixos/tests/akkoma.nix
@@ -33,7 +33,10 @@ let
 
     echo '${userPassword}' | ${pkgs.toot}/bin/toot login_cli -i "akkoma.nixos.test" -e "jamy@nixos.test"
     echo "y" | ${pkgs.toot}/bin/toot post "hello world Jamy here"
-    echo "y" | ${pkgs.toot}/bin/toot timeline | grep -F -q "hello world Jamy here"
+
+    # Retrieving timeline with toot currently broken due to incompatible timestamp format
+    # cf. <https://akkoma.dev/AkkomaGang/akkoma/issues/637> and <https://github.com/ihabunek/toot/issues/399>
+    #echo "y" | ${pkgs.toot}/bin/toot timeline | grep -F -q "hello world Jamy here"
 
     # Test file upload
     echo "y" | ${pkgs.toot}/bin/toot upload <(dd if=/dev/zero bs=1024 count=1024 status=none) \
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index a6201b9d40d0..11cc12f071ce 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -109,6 +109,7 @@ in {
   allTerminfo = handleTest ./all-terminfo.nix {};
   alps = handleTest ./alps.nix {};
   amazon-init-shell = handleTest ./amazon-init-shell.nix {};
+  anbox = runTest ./anbox.nix;
   anuko-time-tracker = handleTest ./anuko-time-tracker.nix {};
   apcupsd = handleTest ./apcupsd.nix {};
   apfs = runTest ./apfs.nix;
@@ -210,6 +211,8 @@ in {
   custom-ca = handleTest ./custom-ca.nix {};
   croc = handleTest ./croc.nix {};
   darling = handleTest ./darling.nix {};
+  dae = handleTest ./dae.nix {};
+  dconf = handleTest ./dconf.nix {};
   deepin = handleTest ./deepin.nix {};
   deluge = handleTest ./deluge.nix {};
   dendrite = handleTest ./matrix/dendrite.nix {};
@@ -272,7 +275,6 @@ in {
   firefox-beta = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-beta; };
   firefox-devedition = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-devedition; };
   firefox-esr    = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr; }; # used in `tested` job
-  firefox-esr-102 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-102; };
   firefox-esr-115 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-115; };
   firejail = handleTest ./firejail.nix {};
   firewall = handleTest ./firewall.nix { nftables = false; };
@@ -282,12 +284,13 @@ in {
   fluentd = handleTest ./fluentd.nix {};
   fluidd = handleTest ./fluidd.nix {};
   fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix {};
-  forgejo = handleTest ./gitea.nix { giteaPackage = pkgs.forgejo; };
+  forgejo = handleTest ./forgejo.nix { };
   freenet = handleTest ./freenet.nix {};
   freeswitch = handleTest ./freeswitch.nix {};
   freshrss-sqlite = handleTest ./freshrss-sqlite.nix {};
   freshrss-pgsql = handleTest ./freshrss-pgsql.nix {};
   frigate = handleTest ./frigate.nix {};
+  frp = handleTest ./frp.nix {};
   frr = handleTest ./frr.nix {};
   fsck = handleTest ./fsck.nix {};
   fsck-systemd-stage-1 = handleTest ./fsck.nix { systemdStage1 = true; };
@@ -345,10 +348,12 @@ in {
   hedgedoc = handleTest ./hedgedoc.nix {};
   herbstluftwm = handleTest ./herbstluftwm.nix {};
   homepage-dashboard = handleTest ./homepage-dashboard.nix {};
+  honk = runTest ./honk.nix;
   installed-tests = pkgs.recurseIntoAttrs (handleTest ./installed-tests {});
   invidious = handleTest ./invidious.nix {};
   oci-containers = handleTestOn ["aarch64-linux" "x86_64-linux"] ./oci-containers.nix {};
   odoo = handleTest ./odoo.nix {};
+  odoo15 = handleTest ./odoo.nix { package = pkgs.odoo15; };
   # 9pnet_virtio used to mount /nix partition doesn't support
   # hibernation. This test happens to work on x86_64-linux but
   # not on other platforms.
@@ -392,7 +397,7 @@ in {
   jibri = handleTest ./jibri.nix {};
   jirafeau = handleTest ./jirafeau.nix {};
   jitsi-meet = handleTest ./jitsi-meet.nix {};
-  jool = handleTest ./jool.nix {};
+  jool = import ./jool.nix { inherit pkgs runTest; };
   k3s = handleTest ./k3s {};
   kafka = handleTest ./kafka.nix {};
   kanidm = handleTest ./kanidm.nix {};
@@ -443,7 +448,7 @@ in {
   loki = handleTest ./loki.nix {};
   luks = handleTest ./luks.nix {};
   lvm2 = handleTest ./lvm2 {};
-  lxd = handleTest ./lxd {};
+  lxd = pkgs.recurseIntoAttrs (handleTest ./lxd { inherit handleTestOn; });
   lxd-image-server = handleTest ./lxd-image-server.nix {};
   #logstash = handleTest ./logstash.nix {};
   lorri = handleTest ./lorri/default.nix {};
@@ -463,6 +468,7 @@ in {
   matrix-appservice-irc = handleTest ./matrix/appservice-irc.nix {};
   matrix-conduit = handleTest ./matrix/conduit.nix {};
   matrix-synapse = handleTest ./matrix/synapse.nix {};
+  matrix-synapse-workers = handleTest ./matrix/synapse-workers.nix {};
   mattermost = handleTest ./mattermost.nix {};
   mediamtx = handleTest ./mediamtx.nix {};
   mediatomb = handleTest ./mediatomb.nix {};
@@ -480,6 +486,7 @@ in {
   miriway = handleTest ./miriway.nix {};
   misc = handleTest ./misc.nix {};
   mjolnir = handleTest ./matrix/mjolnir.nix {};
+  mobilizon = handleTest ./mobilizon.nix {};
   mod_perl = handleTest ./mod_perl.nix {};
   molly-brown = handleTest ./molly-brown.nix {};
   monica = handleTest ./web-apps/monica.nix {};
@@ -666,12 +673,15 @@ in {
   qboot = handleTestOn ["x86_64-linux" "i686-linux"] ./qboot.nix {};
   qemu-vm-restrictnetwork = handleTest ./qemu-vm-restrictnetwork.nix {};
   qemu-vm-volatile-root = runTest ./qemu-vm-volatile-root.nix;
-  quorum = handleTest ./quorum.nix {};
-  quake3 = handleTest ./quake3.nix {};
+  qgis = handleTest ./qgis.nix { qgisPackage = pkgs.qgis; };
+  qgis-ltr = handleTest ./qgis.nix { qgisPackage = pkgs.qgis-ltr; };
   qownnotes = handleTest ./qownnotes.nix {};
+  quake3 = handleTest ./quake3.nix {};
+  quorum = handleTest ./quorum.nix {};
   rabbitmq = handleTest ./rabbitmq.nix {};
   radarr = handleTest ./radarr.nix {};
   radicale = handleTest ./radicale.nix {};
+  ragnarwm = handleTest ./ragnarwm.nix {};
   rasdaemon = handleTest ./rasdaemon.nix {};
   readarr = handleTest ./readarr.nix {};
   redis = handleTest ./redis.nix {};
@@ -724,6 +734,7 @@ in {
   sslh = handleTest ./sslh.nix {};
   sssd = handleTestOn ["x86_64-linux"] ./sssd.nix {};
   sssd-ldap = handleTestOn ["x86_64-linux"] ./sssd-ldap.nix {};
+  stalwart-mail = handleTest ./stalwart-mail.nix {};
   stargazer = runTest ./web-servers/stargazer.nix;
   starship = handleTest ./starship.nix {};
   static-web-server = handleTest ./web-servers/static-web-server.nix {};
@@ -732,6 +743,7 @@ in {
   strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
   stunnel = handleTest ./stunnel.nix {};
   sudo = handleTest ./sudo.nix {};
+  sudo-rs = handleTest ./sudo-rs.nix {};
   swap-file-btrfs = handleTest ./swap-file-btrfs.nix {};
   swap-partition = handleTest ./swap-partition.nix {};
   swap-random-encryption = handleTest ./swap-random-encryption.nix {};
@@ -833,6 +845,7 @@ in {
   uptime-kuma = handleTest ./uptime-kuma.nix {};
   usbguard = handleTest ./usbguard.nix {};
   user-activation-scripts = handleTest ./user-activation-scripts.nix {};
+  user-expiry = runTest ./user-expiry.nix;
   user-home-mode = handleTest ./user-home-mode.nix {};
   uwsgi = handleTest ./uwsgi.nix {};
   v2ray = handleTest ./v2ray.nix {};
diff --git a/nixos/tests/anbox.nix b/nixos/tests/anbox.nix
new file mode 100644
index 000000000000..dfd6c13d9318
--- /dev/null
+++ b/nixos/tests/anbox.nix
@@ -0,0 +1,36 @@
+{ lib, pkgs, ... }:
+
+{
+  name = "anbox";
+  meta.maintainers = with lib.maintainers; [ mvnetbiz ];
+
+  nodes.machine = { pkgs, config, ... }: {
+    imports = [
+      ./common/user-account.nix
+      ./common/x11.nix
+    ];
+
+    environment.systemPackages = with pkgs; [ android-tools ];
+
+    test-support.displayManager.auto.user = "alice";
+
+    virtualisation.anbox.enable = true;
+    boot.kernelPackages = pkgs.linuxPackages_5_15;
+    virtualisation.memorySize = 2500;
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.users.users.alice;
+    bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus";
+  in ''
+    machine.wait_for_x()
+
+    machine.wait_until_succeeds(
+        "sudo -iu alice ${bus} anbox wait-ready"
+    )
+
+    machine.wait_until_succeeds("adb shell true")
+
+    print(machine.succeed("adb devices"))
+  '';
+}
diff --git a/nixos/tests/custom-ca.nix b/nixos/tests/custom-ca.nix
index 25a7b6fdea46..0fcdf81022d7 100644
--- a/nixos/tests/custom-ca.nix
+++ b/nixos/tests/custom-ca.nix
@@ -131,8 +131,8 @@ let
         # chromium-based browsers refuse to run as root
         test-support.displayManager.auto.user = "alice";
 
-        # browsers may hang with the default memory
-        virtualisation.memorySize = 600;
+        # machine often runs out of memory with less
+        virtualisation.memorySize = 1024;
 
         environment.systemPackages = [ pkgs.xdotool pkgs.${browser} ];
       };
diff --git a/nixos/tests/dae.nix b/nixos/tests/dae.nix
new file mode 100644
index 000000000000..b8c8ebce7457
--- /dev/null
+++ b/nixos/tests/dae.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+
+  name = "dae";
+
+  meta = {
+    maintainers = with lib.maintainers; [ oluceps ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.curl ];
+    services.nginx = {
+      enable = true;
+      statusPage = true;
+    };
+    services.dae = {
+      enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_unit("dae.service")
+
+    machine.wait_for_open_port(80)
+
+    machine.succeed("curl --fail --max-time 10 http://localhost")
+  '';
+
+})
diff --git a/nixos/tests/dconf.nix b/nixos/tests/dconf.nix
new file mode 100644
index 000000000000..86f703e3b98e
--- /dev/null
+++ b/nixos/tests/dconf.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix
+  ({ lib, ... }:
+  {
+    name = "dconf";
+
+    meta.maintainers = with lib.maintainers; [
+      linsui
+    ];
+
+    nodes.machine = { config, pkgs, lib, ... }: {
+      users.extraUsers.alice = { isNormalUser = true; };
+      programs.dconf = with lib.gvariant; {
+        enable = true;
+        profiles.user.databases = [
+          {
+            settings = {
+              "test/not/locked" = mkInt32 1;
+              "test/is/locked" = "locked";
+            };
+            locks = [
+              "/test/is/locked"
+            ];
+          }
+        ];
+      };
+    };
+
+    testScript = ''
+      machine.succeed("test $(dconf read -d /test/not/locked) == 1")
+      machine.succeed("test $(dconf read -d /test/is/locked) == \"'locked'\"")
+      machine.fail("sudo -u alice dbus-run-session -- dconf write /test/is/locked \"@s 'unlocked'\"")
+      machine.succeed("sudo -u alice dbus-run-session -- dconf write /test/not/locked \"@i 2\"")
+    '';
+  })
diff --git a/nixos/tests/discourse.nix b/nixos/tests/discourse.nix
index c79ba41c2eb9..3e69a314905c 100644
--- a/nixos/tests/discourse.nix
+++ b/nixos/tests/discourse.nix
@@ -166,7 +166,7 @@ import ./make-test-python.nix (
         request = builtins.toJSON {
           title = "Private message";
           raw = "This is a test message.";
-          target_usernames = admin.username;
+          target_recipients = admin.username;
           archetype = "private_message";
         };
       in ''
diff --git a/nixos/tests/docker-tools.nix b/nixos/tests/docker-tools.nix
index 44b583ebcea5..fcdfa586fd55 100644
--- a/nixos/tests/docker-tools.nix
+++ b/nixos/tests/docker-tools.nix
@@ -55,7 +55,7 @@ in {
   nodes = {
     docker = { ... }: {
       virtualisation = {
-        diskSize = 2048;
+        diskSize = 3072;
         docker.enable = true;
       };
     };
diff --git a/nixos/tests/dolibarr.nix b/nixos/tests/dolibarr.nix
index 2f012a0c67da..4fdee9e9698f 100644
--- a/nixos/tests/dolibarr.nix
+++ b/nixos/tests/dolibarr.nix
@@ -50,7 +50,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     # Now, we have installed the machine, let's verify we still have the right configuration.
     assert 'nixos' in machine.succeed("cat /var/lib/dolibarr/conf.php")
     # We do not want any redirect now as we have installed the machine.
-    machine.succeed('curl -f -X POST http://localhost')
+    machine.succeed('curl -f -X GET http://localhost')
     # Test authentication to the webservice.
     parser = TokenParser()
     parser.feed(machine.succeed('curl -f -X GET http://localhost/index.php?mainmenu=login&username=root'))
diff --git a/nixos/tests/fontconfig-default-fonts.nix b/nixos/tests/fontconfig-default-fonts.nix
index d8c6ea0f721b..293dc43f91f3 100644
--- a/nixos/tests/fontconfig-default-fonts.nix
+++ b/nixos/tests/fontconfig-default-fonts.nix
@@ -9,7 +9,7 @@ import ./make-test-python.nix ({ lib, ... }:
   nodes.machine = { config, pkgs, ... }: {
     fonts.enableDefaultPackages = true; # Background fonts
     fonts.packages = with pkgs; [
-      noto-fonts-emoji
+      noto-fonts-color-emoji
       cantarell-fonts
       twitter-color-emoji
       source-code-pro
diff --git a/nixos/tests/forgejo.nix b/nixos/tests/forgejo.nix
new file mode 100644
index 000000000000..b326819e3190
--- /dev/null
+++ b/nixos/tests/forgejo.nix
@@ -0,0 +1,157 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  ## gpg --faked-system-time='20230301T010000!' --quick-generate-key snakeoil ed25519 sign
+  signingPrivateKey = ''
+    -----BEGIN PGP PRIVATE KEY BLOCK-----
+
+    lFgEY/6jkBYJKwYBBAHaRw8BAQdADXiZRV8RJUyC9g0LH04wLMaJL9WTc+szbMi7
+    5fw4yP8AAQCl8EwGfzSLm/P6fCBfA3I9znFb3MEHGCCJhJ6VtKYyRw7ktAhzbmFr
+    ZW9pbIiUBBMWCgA8FiEE+wUM6VW/NLtAdSixTWQt6LZ4x50FAmP+o5ACGwMFCQPC
+    ZwAECwkIBwQVCgkIBRYCAwEAAh4FAheAAAoJEE1kLei2eMedFTgBAKQs1oGFZrCI
+    TZP42hmBTKxGAI1wg7VSdDEWTZxut/2JAQDGgo2sa4VHMfj0aqYGxrIwfP2B7JHO
+    GCqGCRf9O/hzBA==
+    =9Uy3
+    -----END PGP PRIVATE KEY BLOCK-----
+  '';
+  signingPrivateKeyId = "4D642DE8B678C79D";
+
+  supportedDbTypes = [ "mysql" "postgres" "sqlite3" ];
+  makeGForgejoTest = type: nameValuePair type (makeTest {
+    name = "forgejo-${type}";
+    meta.maintainers = with maintainers; [ bendlas emilylange ];
+
+    nodes = {
+      server = { config, pkgs, ... }: {
+        virtualisation.memorySize = 2047;
+        services.forgejo = {
+          enable = true;
+          database = { inherit type; };
+          settings.service.DISABLE_REGISTRATION = true;
+          settings."repository.signing".SIGNING_KEY = signingPrivateKeyId;
+          settings.actions.ENABLED = true;
+        };
+        environment.systemPackages = [ config.services.forgejo.package pkgs.gnupg pkgs.jq ];
+        services.openssh.enable = true;
+
+        specialisation.runner = {
+          inheritParentConfig = true;
+          configuration.services.gitea-actions-runner.instances."test" = {
+            enable = true;
+            name = "ci";
+            url = "http://localhost:3000";
+            labels = [
+              # don't require docker/podman
+              "native:host"
+            ];
+            tokenFile = "/var/lib/forgejo/runner_token";
+          };
+        };
+      };
+      client1 = { config, pkgs, ... }: {
+        environment.systemPackages = [ pkgs.git ];
+      };
+      client2 = { config, pkgs, ... }: {
+        environment.systemPackages = [ pkgs.git ];
+      };
+    };
+
+    testScript = { nodes, ... }:
+      let
+        inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+        serverSystem = nodes.server.system.build.toplevel;
+      in
+      ''
+        GIT_SSH_COMMAND = "ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no"
+        REPO = "forgejo@server:test/repo"
+        PRIVK = "${snakeOilPrivateKey}"
+
+        start_all()
+
+        client1.succeed("mkdir /tmp/repo")
+        client1.succeed("mkdir -p $HOME/.ssh")
+        client1.succeed(f"cat {PRIVK} > $HOME/.ssh/privk")
+        client1.succeed("chmod 0400 $HOME/.ssh/privk")
+        client1.succeed("git -C /tmp/repo init")
+        client1.succeed("echo hello world > /tmp/repo/testfile")
+        client1.succeed("git -C /tmp/repo add .")
+        client1.succeed("git config --global user.email test@localhost")
+        client1.succeed("git config --global user.name test")
+        client1.succeed("git -C /tmp/repo commit -m 'Initial import'")
+        client1.succeed(f"git -C /tmp/repo remote add origin {REPO}")
+
+        server.wait_for_unit("forgejo.service")
+        server.wait_for_open_port(3000)
+        server.wait_for_open_port(22)
+        server.succeed("curl --fail http://localhost:3000/")
+
+        server.succeed(
+            "su -l forgejo -c 'gpg --homedir /var/lib/forgejo/data/home/.gnupg "
+            + "--import ${toString (pkgs.writeText "forgejo.key" signingPrivateKey)}'"
+        )
+
+        assert "BEGIN PGP PUBLIC KEY BLOCK" in server.succeed("curl http://localhost:3000/api/v1/signing-key.gpg")
+
+        server.succeed(
+            "curl --fail http://localhost:3000/user/sign_up | grep 'Registration is disabled. "
+            + "Please contact your site administrator.'"
+        )
+        server.succeed(
+            "su -l forgejo -c 'GITEA_WORK_DIR=/var/lib/forgejo gitea admin user create "
+            + "--username test --password totallysafe --email test@localhost'"
+        )
+
+        api_token = server.succeed(
+            "curl --fail -X POST http://test:totallysafe@localhost:3000/api/v1/users/test/tokens "
+            + "-H 'Accept: application/json' -H 'Content-Type: application/json' -d "
+            + "'{\"name\":\"token\",\"scopes\":[\"all\"]}' | jq '.sha1' | xargs echo -n"
+        )
+
+        server.succeed(
+            "curl --fail -X POST http://localhost:3000/api/v1/user/repos "
+            + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+            + f"-H 'Authorization: token {api_token}'"
+            + ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
+        )
+
+        server.succeed(
+            "curl --fail -X POST http://localhost:3000/api/v1/user/keys "
+            + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+            + f"-H 'Authorization: token {api_token}'"
+            + ' -d \'{"key":"${snakeOilPublicKey}","read_only":true,"title":"SSH"}\'''
+        )
+
+        client1.succeed(
+            f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git -C /tmp/repo push origin master"
+        )
+
+        client2.succeed("mkdir -p $HOME/.ssh")
+        client2.succeed(f"cat {PRIVK} > $HOME/.ssh/privk")
+        client2.succeed("chmod 0400 $HOME/.ssh/privk")
+        client2.succeed(f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git clone {REPO}")
+        client2.succeed('test "$(cat repo/testfile | xargs echo -n)" = "hello world"')
+
+        server.wait_until_succeeds(
+            'test "$(curl http://localhost:3000/api/v1/repos/test/repo/commits '
+            + '-H "Accept: application/json" | jq length)" = "1"',
+            timeout=10
+        )
+
+        with subtest("Testing runner registration"):
+            server.succeed(
+                "su -l forgejo -c 'GITEA_WORK_DIR=/var/lib/forgejo gitea actions generate-runner-token' | sed 's/^/TOKEN=/' | tee /var/lib/forgejo/runner_token"
+            )
+            server.succeed("${serverSystem}/specialisation/runner/bin/switch-to-configuration test")
+            server.wait_for_unit("gitea-runner-test.service")
+            server.succeed("journalctl -o cat -u gitea-runner-test.service | grep -q 'Runner registered successfully'")
+      '';
+  });
+in
+
+listToAttrs (map makeGForgejoTest supportedDbTypes)
diff --git a/nixos/tests/frp.nix b/nixos/tests/frp.nix
new file mode 100644
index 000000000000..2f5c0f8ec933
--- /dev/null
+++ b/nixos/tests/frp.nix
@@ -0,0 +1,86 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "frp";
+  meta.maintainers = with lib.maintainers; [ zaldnoay janik ];
+  nodes = {
+    frps = {
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "10.0.0.1/24";
+      };
+
+      services.frp = {
+        enable = true;
+        role = "server";
+        settings = {
+          common = {
+            bind_port = 7000;
+            vhost_http_port = 80;
+          };
+        };
+      };
+    };
+
+
+    frpc = {
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+      };
+
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "10.0.0.2/24";
+      };
+
+      services.httpd = {
+        enable = true;
+        adminAddr = "admin@example.com";
+        virtualHosts."test-appication" =
+        let
+          testdir = pkgs.writeTextDir "web/index.php" "<?php phpinfo();";
+        in
+        {
+          documentRoot = "${testdir}/web";
+          locations."/" = {
+            index = "index.php index.html";
+          };
+        };
+        phpPackage = pkgs.php81;
+        enablePHP = true;
+      };
+
+      services.frp = {
+        enable = true;
+        role = "client";
+        settings = {
+          common = {
+            server_addr = "10.0.0.1";
+            server_port = 7000;
+          };
+          web = {
+            type = "http";
+            local_port = 80;
+            custom_domains = "10.0.0.1";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    frps.wait_for_unit("frp.service")
+    frps.wait_for_open_port(80)
+    frpc.wait_for_unit("frp.service")
+    response = frpc.succeed("curl -fvvv -s http://127.0.0.1/")
+    assert "PHP Version ${pkgs.php81.version}" in response, "PHP version not detected"
+    response = frpc.succeed("curl -fvvv -s http://10.0.0.1/")
+    assert "PHP Version ${pkgs.php81.version}" in response, "PHP version not detected"
+  '';
+})
diff --git a/nixos/tests/honk.nix b/nixos/tests/honk.nix
new file mode 100644
index 000000000000..71d86a592439
--- /dev/null
+++ b/nixos/tests/honk.nix
@@ -0,0 +1,32 @@
+{ lib, ... }:
+
+{
+  name = "honk-server";
+
+  nodes = {
+    machine = { pkgs, ... }: {
+      services.honk = {
+        enable = true;
+        host = "0.0.0.0";
+        port = 8080;
+        username = "username";
+        passwordFile = "${pkgs.writeText "honk-password" "secure"}";
+        servername = "servername";
+      };
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("honk.service")
+    machine.wait_for_open_port(8080)
+
+    machine.stop_job("honk")
+    machine.wait_for_closed_port(8080)
+
+    machine.start_job("honk")
+    machine.wait_for_open_port(8080)
+  '';
+
+  meta.maintainers = [ lib.maintainers.drupol ];
+}
diff --git a/nixos/tests/jool.nix b/nixos/tests/jool.nix
index 6d5ded9b18e0..93575f07b1c8 100644
--- a/nixos/tests/jool.nix
+++ b/nixos/tests/jool.nix
@@ -1,9 +1,4 @@
-{ system ? builtins.currentSystem,
-  config ? {},
-  pkgs ? import ../.. { inherit system config; }
-}:
-
-with import ../lib/testing-python.nix { inherit system pkgs; };
+{ pkgs, runTest }:
 
 let
   inherit (pkgs) lib;
@@ -23,7 +18,6 @@ let
       description = "Mock webserver";
       wants = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
-      serviceConfig.Restart = "always";
       script = ''
         while true; do
         {
@@ -40,7 +34,7 @@ let
 in
 
 {
-  siit = makeTest {
+  siit = runTest {
     # This test simulates the setup described in [1] with two IPv6 and
     # IPv4-only devices on different subnets communicating through a border
     # relay running Jool in SIIT mode.
@@ -49,8 +43,7 @@ in
     meta.maintainers = with lib.maintainers; [ rnhmjoj ];
 
     # Border relay
-    nodes.relay = { ... }: {
-      imports = [ ../modules/profiles/minimal.nix ];
+    nodes.relay = {
       virtualisation.vlans = [ 1 2 ];
 
       # Enable packet routing
@@ -65,20 +58,13 @@ in
         eth2.ipv4.addresses = [ { address = "192.0.2.1";  prefixLength = 24; } ];
       };
 
-      networking.jool = {
-        enable = true;
-        siit.enable = true;
-        siit.config.global.pool6 = "fd::/96";
-      };
+      networking.jool.enable = true;
+      networking.jool.siit.default.global.pool6 = "fd::/96";
     };
 
     # IPv6 only node
-    nodes.alice = { ... }: {
-      imports = [
-        ../modules/profiles/minimal.nix
-        ipv6Only
-        (webserver 6 "Hello, Bob!")
-      ];
+    nodes.alice = {
+      imports = [ ipv6Only (webserver 6 "Hello, Bob!") ];
 
       virtualisation.vlans = [ 1 ];
       networking.interfaces.eth1.ipv6 = {
@@ -89,12 +75,8 @@ in
     };
 
     # IPv4 only node
-    nodes.bob = { ... }: {
-      imports = [
-        ../modules/profiles/minimal.nix
-        ipv4Only
-        (webserver 4 "Hello, Alice!")
-      ];
+    nodes.bob = {
+      imports = [ ipv4Only (webserver 4 "Hello, Alice!") ];
 
       virtualisation.vlans = [ 2 ];
       networking.interfaces.eth1.ipv4 = {
@@ -107,17 +89,17 @@ in
     testScript = ''
       start_all()
 
-      relay.wait_for_unit("jool-siit.service")
+      relay.wait_for_unit("jool-siit-default.service")
       alice.wait_for_unit("network-addresses-eth1.service")
       bob.wait_for_unit("network-addresses-eth1.service")
 
       with subtest("Alice and Bob can't ping each other"):
-        relay.systemctl("stop jool-siit.service")
+        relay.systemctl("stop jool-siit-default.service")
         alice.fail("ping -c1 fd::192.0.2.16")
         bob.fail("ping -c1 198.51.100.8")
 
       with subtest("Alice and Bob can ping using the relay"):
-        relay.systemctl("start jool-siit.service")
+        relay.systemctl("start jool-siit-default.service")
         alice.wait_until_succeeds("ping -c1 fd::192.0.2.16")
         bob.wait_until_succeeds("ping -c1 198.51.100.8")
 
@@ -132,7 +114,7 @@ in
     '';
   };
 
-  nat64 = makeTest {
+  nat64 = runTest {
     # This test simulates the setup described in [1] with two IPv6-only nodes
     # (a client and a homeserver) on the LAN subnet and an IPv4 node on the WAN.
     # The router runs Jool in stateful NAT64 mode, masquarading the LAN and
@@ -142,8 +124,7 @@ in
     meta.maintainers = with lib.maintainers; [ rnhmjoj ];
 
     # Router
-    nodes.router = { ... }: {
-      imports = [ ../modules/profiles/minimal.nix ];
+    nodes.router = {
       virtualisation.vlans = [ 1 2 ];
 
       # Enable packet routing
@@ -158,32 +139,29 @@ in
         eth2.ipv4.addresses = [ { address = "203.0.113.1"; prefixLength = 24; } ];
       };
 
-      networking.jool = {
-        enable = true;
-        nat64.enable = true;
-        nat64.config = {
-          bib = [
-            { # forward HTTP 203.0.113.1 (router) → 2001:db8::9 (homeserver)
-              "protocol"     = "TCP";
-              "ipv4 address" = "203.0.113.1#80";
-              "ipv6 address" = "2001:db8::9#80";
-            }
-          ];
-          pool4 = [
-            # Ports for dynamic translation
-            { protocol =  "TCP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
-            { protocol =  "UDP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
-            { protocol = "ICMP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
-            # Ports for static BIB entries
-            { protocol =  "TCP";  prefix = "203.0.113.1/32"; "port range" = "80"; }
-          ];
-        };
+      networking.jool.enable = true;
+      networking.jool.nat64.default = {
+        bib = [
+          { # forward HTTP 203.0.113.1 (router) → 2001:db8::9 (homeserver)
+            "protocol"     = "TCP";
+            "ipv4 address" = "203.0.113.1#80";
+            "ipv6 address" = "2001:db8::9#80";
+          }
+        ];
+        pool4 = [
+          # Ports for dynamic translation
+          { protocol =  "TCP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
+          { protocol =  "UDP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
+          { protocol = "ICMP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
+          # Ports for static BIB entries
+          { protocol =  "TCP";  prefix = "203.0.113.1/32"; "port range" = "80"; }
+        ];
       };
     };
 
     # LAN client (IPv6 only)
-    nodes.client = { ... }: {
-      imports = [ ../modules/profiles/minimal.nix ipv6Only ];
+    nodes.client = {
+      imports = [ ipv6Only ];
       virtualisation.vlans = [ 1 ];
 
       networking.interfaces.eth1.ipv6 = {
@@ -194,12 +172,8 @@ in
     };
 
     # LAN server (IPv6 only)
-    nodes.homeserver = { ... }: {
-      imports = [
-        ../modules/profiles/minimal.nix
-        ipv6Only
-        (webserver 6 "Hello from IPv6!")
-      ];
+    nodes.homeserver = {
+      imports = [ ipv6Only (webserver 6 "Hello from IPv6!") ];
 
       virtualisation.vlans = [ 1 ];
       networking.interfaces.eth1.ipv6 = {
@@ -210,12 +184,8 @@ in
     };
 
     # WAN server (IPv4 only)
-    nodes.server = { ... }: {
-      imports = [
-        ../modules/profiles/minimal.nix
-        ipv4Only
-        (webserver 4 "Hello from IPv4!")
-      ];
+    nodes.server = {
+      imports = [ ipv4Only (webserver 4 "Hello from IPv4!") ];
 
       virtualisation.vlans = [ 2 ];
       networking.interfaces.eth1.ipv4.addresses =
@@ -229,7 +199,7 @@ in
         node.wait_for_unit("network-addresses-eth1.service")
 
       with subtest("Client can ping the WAN server"):
-        router.wait_for_unit("jool-nat64.service")
+        router.wait_for_unit("jool-nat64-default.service")
         client.succeed("ping -c1 64:ff9b::203.0.113.16")
 
       with subtest("Client can connect to the WAN webserver"):
diff --git a/nixos/tests/kea.nix b/nixos/tests/kea.nix
index b4095893b482..c8ecf771fa13 100644
--- a/nixos/tests/kea.nix
+++ b/nixos/tests/kea.nix
@@ -134,31 +134,32 @@ import ./make-test-python.nix ({ pkgs, lib, ...}: {
         extraArgs = [
           "-v"
         ];
-        extraConfig = ''
-          server:
-              listen: 0.0.0.0@53
-
-          log:
-            - target: syslog
-              any: debug
-
-          acl:
-            - id: dhcp_ddns
-              address: 10.0.0.1
-              action: update
-
-          template:
-            - id: default
-              storage: ${zonesDir}
-              zonefile-sync: -1
-              zonefile-load: difference-no-serial
-              journal-content: all
-
-          zone:
-            - domain: lan.nixos.test
-              file: lan.nixos.test.zone
-              acl: [dhcp_ddns]
-        '';
+        settings = {
+          server.listen = [
+            "0.0.0.0@53"
+          ];
+
+          log.syslog.any = "info";
+
+          acl.dhcp_ddns = {
+            address = "10.0.0.1";
+            action = "update";
+          };
+
+          template.default = {
+            storage = zonesDir;
+            zonefile-sync = "-1";
+            zonefile-load = "difference-no-serial";
+            journal-content = "all";
+          };
+
+          zone."lan.nixos.test" = {
+            file = "lan.nixos.test.zone";
+            acl = [
+              "dhcp_ddns"
+            ];
+          };
+        };
       };
 
     };
diff --git a/nixos/tests/kernel-generic.nix b/nixos/tests/kernel-generic.nix
index e69dd550289c..148f66c464d6 100644
--- a/nixos/tests/kernel-generic.nix
+++ b/nixos/tests/kernel-generic.nix
@@ -32,6 +32,7 @@ let
       linux_5_15_hardened
       linux_6_1_hardened
       linux_6_4_hardened
+      linux_6_5_hardened
       linux_rt_5_4
       linux_rt_5_10
       linux_rt_5_15
diff --git a/nixos/tests/knot.nix b/nixos/tests/knot.nix
index 2ecbf69194bb..44efd93b6fa9 100644
--- a/nixos/tests/knot.nix
+++ b/nixos/tests/knot.nix
@@ -60,44 +60,43 @@ in {
       services.knot.enable = true;
       services.knot.extraArgs = [ "-v" ];
       services.knot.keyFiles = [ tsigFile ];
-      services.knot.extraConfig = ''
-        server:
-            listen: 0.0.0.0@53
-            listen: ::@53
-            automatic-acl: true
-
-        remote:
-          - id: secondary
-            address: 192.168.0.2@53
-            key: xfr_key
-
-        template:
-          - id: default
-            storage: ${knotZonesEnv}
-            notify: [secondary]
-            dnssec-signing: on
-            # Input-only zone files
-            # https://www.knot-dns.cz/docs/2.8/html/operation.html#example-3
-            # prevents modification of the zonefiles, since the zonefiles are immutable
-            zonefile-sync: -1
-            zonefile-load: difference
-            journal-content: changes
-            # move databases below the state directory, because they need to be writable
-            journal-db: /var/lib/knot/journal
-            kasp-db: /var/lib/knot/kasp
-            timer-db: /var/lib/knot/timer
-
-        zone:
-          - domain: example.com
-            file: example.com.zone
-
-          - domain: sub.example.com
-            file: sub.example.com.zone
-
-        log:
-          - target: syslog
-            any: info
-      '';
+      services.knot.settings = {
+        server = {
+          listen = [
+            "0.0.0.0@53"
+            "::@53"
+           ];
+          automatic-acl = true;
+        };
+
+        acl.secondary_acl = {
+          address = "192.168.0.2";
+          key = "xfr_key";
+          action = "transfer";
+        };
+
+        remote.secondary.address = "192.168.0.2@53";
+
+        template.default = {
+          storage = knotZonesEnv;
+          notify = [ "secondary" ];
+          acl = [ "secondary_acl" ];
+          dnssec-signing = true;
+          # Input-only zone files
+          # https://www.knot-dns.cz/docs/2.8/html/operation.html#example-3
+          # prevents modification of the zonefiles, since the zonefiles are immutable
+          zonefile-sync = -1;
+          zonefile-load = "difference";
+          journal-content = "changes";
+        };
+
+        zone = {
+          "example.com".file = "example.com.zone";
+          "sub.example.com".file = "sub.example.com.zone";
+        };
+
+        log.syslog.any = "info";
+      };
     };
 
     secondary = { lib, ... }: {
@@ -113,41 +112,36 @@ in {
       services.knot.enable = true;
       services.knot.keyFiles = [ tsigFile ];
       services.knot.extraArgs = [ "-v" ];
-      services.knot.extraConfig = ''
-        server:
-            listen: 0.0.0.0@53
-            listen: ::@53
-            automatic-acl: true
-
-        remote:
-          - id: primary
-            address: 192.168.0.1@53
-            key: xfr_key
-
-        template:
-          - id: default
-            master: primary
-            # zonefileless setup
-            # https://www.knot-dns.cz/docs/2.8/html/operation.html#example-2
-            zonefile-sync: -1
-            zonefile-load: none
-            journal-content: all
-            # move databases below the state directory, because they need to be writable
-            journal-db: /var/lib/knot/journal
-            kasp-db: /var/lib/knot/kasp
-            timer-db: /var/lib/knot/timer
-
-        zone:
-          - domain: example.com
-            file: example.com.zone
-
-          - domain: sub.example.com
-            file: sub.example.com.zone
-
-        log:
-          - target: syslog
-            any: info
-      '';
+      services.knot.settings = {
+        server = {
+          listen = [
+            "0.0.0.0@53"
+            "::@53"
+          ];
+          automatic-acl = true;
+        };
+
+        remote.primary = {
+          address = "192.168.0.1@53";
+          key = "xfr_key";
+        };
+
+        template.default = {
+          master = "primary";
+          # zonefileless setup
+          # https://www.knot-dns.cz/docs/2.8/html/operation.html#example-2
+          zonefile-sync = "-1";
+          zonefile-load = "none";
+          journal-content = "all";
+        };
+
+        zone = {
+          "example.com".file = "example.com.zone";
+          "sub.example.com".file = "sub.example.com.zone";
+        };
+
+        log.syslog.any = "info";
+      };
     };
     client = { lib, nodes, ... }: {
       imports = [ common ];
diff --git a/nixos/tests/lxd/container.nix b/nixos/tests/lxd/container.nix
index 9e56f6e41e05..bdaaebfc0028 100644
--- a/nixos/tests/lxd/container.nix
+++ b/nixos/tests/lxd/container.nix
@@ -1,7 +1,7 @@
 import ../make-test-python.nix ({ pkgs, lib, ... } :
 
 let
-  lxd-image = import ../../release.nix {
+  releases = import ../../release.nix {
     configuration = {
       # Building documentation makes the test unnecessarily take a longer time:
       documentation.enable = lib.mkForce false;
@@ -11,14 +11,14 @@ let
     };
   };
 
-  lxd-image-metadata = lxd-image.lxdMeta.${pkgs.stdenv.hostPlatform.system};
-  lxd-image-rootfs = lxd-image.lxdImage.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
 
 in {
-  name = "lxd";
+  name = "lxd-container";
 
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ patryk27 ];
+    maintainers = [ patryk27 adamcstephens ];
   };
 
   nodes.machine = { lib, ... }: {
@@ -49,6 +49,9 @@ in {
     # Wait for lxd to settle
     machine.succeed("lxd waitready")
 
+    # no preseed should mean no service
+    machine.fail("systemctl status lxd-preseed.service")
+
     machine.succeed("lxd init --minimal")
 
     machine.succeed(
diff --git a/nixos/tests/lxd/default.nix b/nixos/tests/lxd/default.nix
index 2e34907d7936..20afdd5e48bb 100644
--- a/nixos/tests/lxd/default.nix
+++ b/nixos/tests/lxd/default.nix
@@ -2,8 +2,11 @@
   system ? builtins.currentSystem,
   config ? {},
   pkgs ? import ../../.. {inherit system config;},
+  handleTestOn,
 }: {
   container = import ./container.nix {inherit system pkgs;};
   nftables = import ./nftables.nix {inherit system pkgs;};
+  preseed = import ./preseed.nix {inherit system pkgs;};
   ui = import ./ui.nix {inherit system pkgs;};
+  virtual-machine = handleTestOn ["x86_64-linux"] ./virtual-machine.nix { inherit system pkgs; };
 }
diff --git a/nixos/tests/lxd/nftables.nix b/nixos/tests/lxd/nftables.nix
index b85caa9eb368..d98bd4952906 100644
--- a/nixos/tests/lxd/nftables.nix
+++ b/nixos/tests/lxd/nftables.nix
@@ -20,8 +20,8 @@ import ../make-test-python.nix ({ pkgs, ...} : {
     networking = {
       firewall.enable = false;
       nftables.enable = true;
-      nftables.ruleset = ''
-        table inet filter {
+      nftables.tables."filter".family = "inet";
+      nftables.tables."filter".content = ''
           chain incoming {
             type filter hook input priority 0;
             policy accept;
@@ -36,7 +36,6 @@ import ../make-test-python.nix ({ pkgs, ...} : {
             type filter hook output priority 0;
             policy accept;
           }
-        }
       '';
     };
   };
diff --git a/nixos/tests/lxd/preseed.nix b/nixos/tests/lxd/preseed.nix
new file mode 100644
index 000000000000..7d89b9f56daa
--- /dev/null
+++ b/nixos/tests/lxd/preseed.nix
@@ -0,0 +1,71 @@
+import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+{
+  name = "lxd-preseed";
+
+  meta = {
+    maintainers = with lib.maintainers; [ adamcstephens ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      diskSize = 4096;
+
+      lxc.lxcfs.enable = true;
+      lxd.enable = true;
+
+      lxd.preseed = {
+        networks = [
+          {
+            name = "nixostestbr0";
+            type = "bridge";
+            config = {
+              "ipv4.address" = "10.0.100.1/24";
+              "ipv4.nat" = "true";
+            };
+          }
+        ];
+        profiles = [
+          {
+            name = "nixostest_default";
+            devices = {
+              eth0 = {
+                name = "eth0";
+                network = "nixostestbr0";
+                type = "nic";
+              };
+              root = {
+                path = "/";
+                pool = "default";
+                size = "35GiB";
+                type = "disk";
+              };
+            };
+          }
+        ];
+        storage_pools = [
+          {
+            name = "nixostest_pool";
+            driver = "dir";
+          }
+        ];
+      };
+    };
+  };
+
+  testScript = ''
+    def wait_for_preseed(_) -> bool:
+      _, output = machine.systemctl("is-active lxd-preseed.service")
+      return ("inactive" in output)
+
+    machine.wait_for_unit("sockets.target")
+    machine.wait_for_unit("lxd.service")
+    with machine.nested("Waiting for preseed to complete"):
+      retry(wait_for_preseed)
+
+    with subtest("Verify preseed resources created"):
+      machine.succeed("lxc profile show nixostest_default")
+      machine.succeed("lxc network info nixostestbr0")
+      machine.succeed("lxc storage show nixostest_pool")
+  '';
+})
diff --git a/nixos/tests/lxd/virtual-machine.nix b/nixos/tests/lxd/virtual-machine.nix
new file mode 100644
index 000000000000..93705e9350c5
--- /dev/null
+++ b/nixos/tests/lxd/virtual-machine.nix
@@ -0,0 +1,64 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  releases = import ../../release.nix {
+    configuration = {
+      # Building documentation makes the test unnecessarily take a longer time:
+      documentation.enable = lib.mkForce false;
+
+      # Our tests require `grep` & friends:
+      environment.systemPackages = with pkgs; [busybox];
+    };
+  };
+
+  lxd-image-metadata = releases.lxdVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-disk = releases.lxdVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
+
+  instance-name = "instance1";
+in {
+  name = "lxd-virtual-machine";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [adamcstephens];
+  };
+
+  nodes.machine = {lib, ...}: {
+    virtualisation = {
+      diskSize = 4096;
+
+      cores = 2;
+
+      # Ensure we have enough memory for the nested virtual machine
+      memorySize = 1024;
+
+      lxc.lxcfs.enable = true;
+      lxd.enable = true;
+    };
+  };
+
+  testScript = ''
+    def instance_is_up(_) -> bool:
+      status, _ = machine.execute("lxc exec ${instance-name} --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+      return status == 0
+
+    machine.wait_for_unit("sockets.target")
+    machine.wait_for_unit("lxd.service")
+    machine.wait_for_file("/var/lib/lxd/unix.socket")
+
+    # Wait for lxd to settle
+    machine.succeed("lxd waitready")
+
+    machine.succeed("lxd init --minimal")
+
+    with subtest("virtual-machine image can be imported"):
+        machine.succeed("lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-disk}/nixos.qcow2 --alias nixos")
+
+    with subtest("virtual-machine can be launched and become available"):
+        machine.succeed("lxc launch nixos ${instance-name} --vm --config limits.memory=512MB --config security.secureboot=false")
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
+
+    with subtest("lxd-agent is started"):
+        machine.succeed("lxc exec ${instance-name} systemctl is-active lxd-agent")
+  '';
+})
diff --git a/nixos/tests/matrix/synapse-workers.nix b/nixos/tests/matrix/synapse-workers.nix
new file mode 100644
index 000000000000..e90301aeae9e
--- /dev/null
+++ b/nixos/tests/matrix/synapse-workers.nix
@@ -0,0 +1,50 @@
+import ../make-test-python.nix ({ pkgs, ... }: {
+  name = "matrix-synapse-workers";
+  meta = with pkgs.lib; {
+    maintainers = teams.matrix.members;
+  };
+
+  nodes = {
+    homeserver =
+      { pkgs
+      , nodes
+      , ...
+      }: {
+        services.postgresql = {
+          enable = true;
+          initialScript = pkgs.writeText "synapse-init.sql" ''
+            CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
+            CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
+            TEMPLATE template0
+            LC_COLLATE = "C"
+            LC_CTYPE = "C";
+          '';
+        };
+
+        services.matrix-synapse = {
+          enable = true;
+          settings = {
+            database = {
+              name = "psycopg2";
+              args.password = "synapse";
+            };
+            enable_registration = true;
+            enable_registration_without_verification = true;
+
+            federation_sender_instances = [ "federation_sender" ];
+          };
+          configureRedisLocally = true;
+          workers = {
+            "federation_sender" = { };
+          };
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+
+    homeserver.wait_for_unit("matrix-synapse.service");
+    homeserver.wait_for_unit("matrix-synapse-worker-federation_sender.service");
+  '';
+})
diff --git a/nixos/tests/mobilizon.nix b/nixos/tests/mobilizon.nix
new file mode 100644
index 000000000000..2b070ca9d960
--- /dev/null
+++ b/nixos/tests/mobilizon.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ lib, ... }:
+  let
+    certs = import ./common/acme/server/snakeoil-certs.nix;
+    mobilizonDomain = certs.domain;
+    port = 41395;
+  in
+
+  {
+    name = "mobilizon";
+    meta.maintainers = with lib.maintainers; [ minijackson erictapen ];
+
+    nodes.server =
+      { ... }:
+      {
+        services.mobilizon = {
+          enable = true;
+          settings = {
+            ":mobilizon" = {
+              ":instance" = {
+                name = "Test Mobilizon";
+                hostname = mobilizonDomain;
+              };
+              "Mobilizon.Web.Endpoint".http.port = port;
+            };
+          };
+        };
+
+        security.pki.certificateFiles = [ certs.ca.cert ];
+
+        services.nginx.virtualHosts."${mobilizonDomain}" = {
+          enableACME = lib.mkForce false;
+          sslCertificate = certs.${mobilizonDomain}.cert;
+          sslCertificateKey = certs.${mobilizonDomain}.key;
+        };
+
+        networking.hosts."::1" = [ mobilizonDomain ];
+      };
+
+    testScript = ''
+      server.wait_for_unit("mobilizon.service")
+      server.wait_for_open_port(${toString port})
+      server.succeed("curl --fail https://${mobilizonDomain}/")
+    '';
+  })
diff --git a/nixos/tests/noto-fonts.nix b/nixos/tests/noto-fonts.nix
index edbb0db4cb7a..b871f5f51729 100644
--- a/nixos/tests/noto-fonts.nix
+++ b/nixos/tests/noto-fonts.nix
@@ -11,7 +11,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         noto-fonts
         noto-fonts-cjk-sans
         noto-fonts-cjk-serif
-        noto-fonts-emoji
+        noto-fonts-color-emoji
       ];
       fontconfig.defaultFonts = {
         serif = [ "Noto Serif" "Noto Serif CJK SC" ];
diff --git a/nixos/tests/odoo.nix b/nixos/tests/odoo.nix
index 7c2cf31370f9..00ae4a2137d1 100644
--- a/nixos/tests/odoo.nix
+++ b/nixos/tests/odoo.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, lib, ...} : {
+import ./make-test-python.nix ({ pkgs, lib, package ? pkgs.odoo, ...} : {
   name = "odoo";
   meta.maintainers = with lib.maintainers; [ mkg20001 ];
 
@@ -11,6 +11,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
 
       services.odoo = {
         enable = true;
+        package = package;
         domain = "localhost";
       };
     };
diff --git a/nixos/tests/openssh.nix b/nixos/tests/openssh.nix
index 4083f5906d79..d771ffd3e0f7 100644
--- a/nixos/tests/openssh.nix
+++ b/nixos/tests/openssh.nix
@@ -52,6 +52,36 @@ in {
         };
       };
 
+    server_match_rule =
+      { ... }:
+
+      {
+        services.openssh = {
+          enable = true; listenAddresses = [ { addr = "127.0.0.1"; port = 22; } ];
+          extraConfig = ''
+            # Combined test for two (predictable) Match criterias
+            Match LocalAddress 127.0.0.1 LocalPort 22
+              PermitRootLogin yes
+
+            # Separate tests for Match criterias
+            Match User root
+              PermitRootLogin yes
+            Match Group root
+              PermitRootLogin yes
+            Match Host nohost.example
+              PermitRootLogin yes
+            Match LocalAddress 127.0.0.1
+              PermitRootLogin yes
+            Match LocalPort 22
+              PermitRootLogin yes
+            Match RDomain nohost.example
+              PermitRootLogin yes
+            Match Address 127.0.0.1
+              PermitRootLogin yes
+          '';
+        };
+      };
+
     client =
       { ... }: { };
 
@@ -114,5 +144,8 @@ in {
     with subtest("localhost-only"):
         server_localhost_only.succeed("ss -nlt | grep '127.0.0.1:22'")
         server_localhost_only_lazy.succeed("ss -nlt | grep '127.0.0.1:22'")
+
+    with subtest("match-rules"):
+        server_match_rule.succeed("ss -nlt | grep '127.0.0.1:22'")
   '';
 })
diff --git a/nixos/tests/plausible.nix b/nixos/tests/plausible.nix
index ab91e08beb34..ef32bb3a805f 100644
--- a/nixos/tests/plausible.nix
+++ b/nixos/tests/plausible.nix
@@ -30,6 +30,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
 
     machine.succeed("curl -f localhost:8000 >&2")
 
+    machine.succeed("curl -f localhost:8000/js/script.js >&2")
+
     csrf_token = machine.succeed(
         "curl -c /tmp/cookies localhost:8000/login | grep '_csrf_token' | sed -E 's,.*value=\"(.*)\".*,\\1,g'"
     )
diff --git a/nixos/tests/postgis.nix b/nixos/tests/postgis.nix
index 9d81ebaad85f..d0685abc510c 100644
--- a/nixos/tests/postgis.nix
+++ b/nixos/tests/postgis.nix
@@ -9,7 +9,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
       { pkgs, ... }:
 
       {
-        services.postgresql = let mypg = pkgs.postgresql_11; in {
+        services.postgresql = let mypg = pkgs.postgresql; in {
             enable = true;
             package = mypg;
             extraPlugins = with mypg.pkgs; [
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index d86f8ac634e8..306c5e071e75 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -716,6 +716,41 @@ let
       '';
     };
 
+    mysqld = {
+      exporterConfig = {
+        enable = true;
+        runAsLocalSuperUser = true;
+        configFile = pkgs.writeText "test-prometheus-exporter-mysqld-config.my-cnf" ''
+          [client]
+          user = exporter
+          password = snakeoilpassword
+        '';
+      };
+      metricProvider = {
+        services.mysql = {
+          enable = true;
+          package = pkgs.mariadb;
+          initialScript = pkgs.writeText "mysql-init-script.sql" ''
+            CREATE USER 'exporter'@'localhost'
+            IDENTIFIED BY 'snakeoilpassword'
+            WITH MAX_USER_CONNECTIONS 3;
+            GRANT PROCESS, REPLICATION CLIENT, SLAVE MONITOR, SELECT ON *.* TO 'exporter'@'localhost';
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-mysqld-exporter.service")
+        wait_for_open_port(9104)
+        wait_for_unit("mysql.service")
+        succeed("curl -sSf http://localhost:9104/metrics | grep 'mysql_up 1'")
+        systemctl("stop mysql.service")
+        succeed("curl -sSf http://localhost:9104/metrics | grep 'mysql_up 0'")
+        systemctl("start mysql.service")
+        wait_for_unit("mysql.service")
+        succeed("curl -sSf http://localhost:9104/metrics | grep 'mysql_up 1'")
+      '';
+    };
+
     nextcloud = {
       exporterConfig = {
         enable = true;
@@ -1387,8 +1422,7 @@ let
     unbound = {
       exporterConfig = {
         enable = true;
-        fetchType = "uds";
-        controlInterface = "/run/unbound/unbound.ctl";
+        unbound.host = "unix:///run/unbound/unbound.ctl";
       };
       metricProvider = {
         services.unbound = {
@@ -1403,7 +1437,7 @@ let
         wait_for_unit("unbound.service")
         wait_for_unit("prometheus-unbound-exporter.service")
         wait_for_open_port(9167)
-        succeed("curl -sSf localhost:9167/metrics | grep 'unbound_up 1'")
+        wait_until_succeeds("curl -sSf localhost:9167/metrics | grep 'unbound_up 1'")
       '';
     };
 
diff --git a/nixos/tests/qgis.nix b/nixos/tests/qgis.nix
new file mode 100644
index 000000000000..7706b8c07747
--- /dev/null
+++ b/nixos/tests/qgis.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, lib, qgisPackage, ... }:
+  let
+    testScript = pkgs.writeTextFile {
+      name = "qgis-test.py";
+      text = (builtins.readFile ../../pkgs/applications/gis/qgis/test.py);
+    };
+  in
+  {
+    name = "qgis";
+    meta = {
+      maintainers = with lib; [ teams.geospatial.members ];
+    };
+
+    nodes = {
+      machine = { pkgs, ... }: {
+        virtualisation.diskSize = 2 * 1024;
+
+        imports = [ ./common/x11.nix ];
+        environment.systemPackages = [ qgisPackage ];
+
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      machine.succeed("${qgisPackage}/bin/qgis --version | grep 'QGIS ${qgisPackage.version}'")
+      machine.succeed("${qgisPackage}/bin/qgis --code ${testScript}")
+    '';
+  })
diff --git a/nixos/tests/ragnarwm.nix b/nixos/tests/ragnarwm.nix
new file mode 100644
index 000000000000..f7c588b92008
--- /dev/null
+++ b/nixos/tests/ragnarwm.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ lib, ...} : {
+  name = "ragnarwm";
+
+  meta = {
+    maintainers = with lib.maintainers; [ sigmanificient ];
+  };
+
+  nodes.machine = { pkgs, lib, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = lib.mkForce "ragnar";
+    services.xserver.windowManager.ragnarwm.enable = true;
+
+    # Setup the default terminal of Ragnar
+    environment.systemPackages = [ pkgs.alacritty ];
+  };
+
+  testScript = ''
+    with subtest("ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("/home/alice/.Xauthority")
+        machine.succeed("xauth merge ~alice/.Xauthority")
+
+    with subtest("ensure we can open a new terminal"):
+        # Sleeping a bit before the test, as it may help for sending keys
+        machine.sleep(2)
+        machine.send_key("meta_l-ret")
+        machine.wait_for_window(r"alice.*?machine")
+        machine.sleep(2)
+        machine.screenshot("terminal")
+  '';
+})
diff --git a/nixos/tests/restic.nix b/nixos/tests/restic.nix
index 626049e73341..3b9ea2f85b1e 100644
--- a/nixos/tests/restic.nix
+++ b/nixos/tests/restic.nix
@@ -97,9 +97,9 @@ import ./make-test-python.nix (
       server.start()
       server.wait_for_unit("dbus.socket")
       server.fail(
-          "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots",
-          '${pkgs.restic}/bin/restic -r ${remoteFromFileRepository} -p ${passwordFile} snapshots"',
-          "${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots",
+          "restic-remotebackup snapshots",
+          'restic-remote-from-file-backup snapshots"',
+          "restic-rclonebackup snapshots",
           "grep 'backup.* /opt' /root/fake-restic.log",
       )
       server.succeed(
@@ -112,20 +112,20 @@ import ./make-test-python.nix (
           "timedatectl set-time '2016-12-13 13:45'",
           "systemctl start restic-backups-remotebackup.service",
           "rm /root/backupCleanupCommand",
-          '${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+          'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
           # test that restoring that snapshot produces the same directory
           "mkdir /tmp/restore-1",
-          "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} restore latest -t /tmp/restore-1",
+          "restic-remotebackup restore latest -t /tmp/restore-1",
           "diff -ru ${testDir} /tmp/restore-1/opt",
 
           # test that remote-from-file-backup produces a snapshot
           "systemctl start restic-backups-remote-from-file-backup.service",
-          '${pkgs.restic}/bin/restic -r ${remoteFromFileRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+          'restic-remote-from-file-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
           # test that rclonebackup produces a snapshot
           "systemctl start restic-backups-rclonebackup.service",
-          '${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+          'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
           # test that custompackage runs both `restic backup` and `restic check` with reasonable commandlines
           "systemctl start restic-backups-custompackage.service",
@@ -158,12 +158,12 @@ import ./make-test-python.nix (
           "rm /root/backupCleanupCommand",
           "systemctl start restic-backups-rclonebackup.service",
 
-          '${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
-          '${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
+          'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
+          'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
 
           # test that remoteprune brings us back to 1 snapshot in remotebackup
           "systemctl start restic-backups-remoteprune.service",
-          '${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+          'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
       )
     '';
diff --git a/nixos/tests/shadow.nix b/nixos/tests/shadow.nix
index c9a04088e870..a027af7e450b 100644
--- a/nixos/tests/shadow.nix
+++ b/nixos/tests/shadow.nix
@@ -32,7 +32,7 @@ in import ./make-test-python.nix ({ pkgs, ... }: {
       };
       users.berta = {
         isNormalUser = true;
-        hashedPassword = hashed_bcrypt;
+        hashedPasswordFile = (pkgs.writeText "hashed_bcrypt" hashed_bcrypt).outPath;
         shell = pkgs.bash;
       };
       users.yesim = {
diff --git a/nixos/tests/stalwart-mail.nix b/nixos/tests/stalwart-mail.nix
new file mode 100644
index 000000000000..b5589966a160
--- /dev/null
+++ b/nixos/tests/stalwart-mail.nix
@@ -0,0 +1,117 @@
+# Rudimentary test checking that the Stalwart email server can:
+# - receive some message through SMTP submission, then
+# - serve this message through IMAP.
+
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+
+in import ./make-test-python.nix ({ lib, ... }: {
+  name = "stalwart-mail";
+
+  nodes.main = { pkgs, ... }: {
+    security.pki.certificateFiles = [ certs.ca.cert ];
+
+    services.stalwart-mail = {
+      enable = true;
+      settings = {
+        server.hostname = domain;
+
+        certificate."snakeoil" = {
+          cert = "file://${certs.${domain}.cert}";
+          private-key = "file://${certs.${domain}.key}";
+        };
+
+        server.tls = {
+          certificate = "snakeoil";
+          enable = true;
+          implicit = false;
+        };
+
+        server.listener = {
+          "smtp-submission" = {
+            bind = [ "[::]:587" ];
+            protocol = "smtp";
+          };
+
+          "imap" = {
+            bind = [ "[::]:143" ];
+            protocol = "imap";
+          };
+        };
+
+        session.auth.mechanisms = [ "PLAIN" ];
+        session.auth.directory = "in-memory";
+        jmap.directory = "in-memory";  # shared with imap
+
+        session.rcpt.directory = "in-memory";
+        queue.outbound.next-hop = [ "local" ];
+
+        directory."in-memory" = {
+          type = "memory";
+          users = [
+            {
+              name = "alice";
+              secret = "foobar";
+              email = [ "alice@${domain}" ];
+            }
+            {
+              name = "bob";
+              secret = "foobar";
+              email = [ "bob@${domain}" ];
+            }
+          ];
+        };
+      };
+    };
+
+    environment.systemPackages = [
+      (pkgs.writers.writePython3Bin "test-smtp-submission" { } ''
+        from smtplib import SMTP
+
+        with SMTP('localhost', 587) as smtp:
+            smtp.starttls()
+            smtp.login('alice', 'foobar')
+            smtp.sendmail(
+                'alice@${domain}',
+                'bob@${domain}',
+                """
+                    From: alice@${domain}
+                    To: bob@${domain}
+                    Subject: Some test message
+
+                    This is a test message.
+                """.strip()
+            )
+      '')
+
+      (pkgs.writers.writePython3Bin "test-imap-read" { } ''
+        from imaplib import IMAP4
+
+        with IMAP4('localhost') as imap:
+            imap.starttls()
+            imap.login('bob', 'foobar')
+            imap.select('"All Mail"')
+            status, [ref] = imap.search(None, 'ALL')
+            assert status == 'OK'
+            [msgId] = ref.split()
+            status, msg = imap.fetch(msgId, 'BODY[TEXT]')
+            assert status == 'OK'
+            assert msg[0][1].strip() == b'This is a test message.'
+      '')
+    ];
+  };
+
+  testScript = /* python */ ''
+    main.wait_for_unit("stalwart-mail.service")
+    main.wait_for_open_port(587)
+    main.wait_for_open_port(143)
+
+    main.succeed("test-smtp-submission")
+    main.succeed("test-imap-read")
+  '';
+
+  meta = {
+    maintainers = with lib.maintainers; [ happysalada pacien ];
+  };
+})
diff --git a/nixos/tests/sudo-rs.nix b/nixos/tests/sudo-rs.nix
new file mode 100644
index 000000000000..6006863217b6
--- /dev/null
+++ b/nixos/tests/sudo-rs.nix
@@ -0,0 +1,101 @@
+# Some tests to ensure sudo is working properly.
+{ pkgs, ... }:
+let
+  inherit (pkgs.lib) mkIf optionalString;
+  password = "helloworld";
+in
+  import ./make-test-python.nix ({ lib, pkgs, ...} : {
+    name = "sudo-rs";
+    meta.maintainers = pkgs.sudo-rs.meta.maintainers;
+
+    nodes.machine =
+      { lib, ... }:
+      {
+        environment.systemPackages = [ pkgs.faketty ];
+        users.groups = { foobar = {}; barfoo = {}; baz = { gid = 1337; }; };
+        users.users = {
+          test0 = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+          test1 = { isNormalUser = true; password = password; };
+          test2 = { isNormalUser = true; extraGroups = [ "foobar" ]; password = password; };
+          test3 = { isNormalUser = true; extraGroups = [ "barfoo" ]; };
+          test4 = { isNormalUser = true; extraGroups = [ "baz" ]; };
+          test5 = { isNormalUser = true; };
+        };
+
+        security.sudo.enable = false;
+
+        security.sudo-rs = {
+          enable = true;
+          package = pkgs.sudo-rs;
+          wheelNeedsPassword = false;
+
+          extraRules = [
+            # SUDOERS SYNTAX CHECK (Test whether the module produces a valid output;
+            # errors being detected by the visudo checks.
+
+            # These should not create any entries
+            { users = [ "notest1" ]; commands = [ ]; }
+            { commands = [ { command = "ALL"; options = [ ]; } ]; }
+
+            # Test defining commands with the options syntax, though not setting any options
+            { users = [ "notest2" ]; commands = [ { command = "ALL"; options = [ ]; } ]; }
+
+
+            # CONFIGURATION FOR TEST CASES
+            { users = [ "test1" ]; groups = [ "foobar" ]; commands = [ "ALL" ]; }
+            { groups = [ "barfoo" 1337 ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; }
+            { users = [ "test5" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; runAs = "test1:barfoo"; }
+          ];
+        };
+      };
+
+    nodes.strict = { ... }: {
+      environment.systemPackages = [ pkgs.faketty ];
+      users.users = {
+        admin = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+        noadmin = { isNormalUser = true; };
+      };
+
+      security.sudo.enable = false;
+
+      security.sudo-rs = {
+        package = pkgs.sudo-rs;
+        enable = true;
+        wheelNeedsPassword = false;
+        execWheelOnly = true;
+      };
+    };
+
+    testScript =
+      ''
+        with subtest("users in wheel group should have passwordless sudo"):
+            machine.succeed('faketty -- su - test0 -c "sudo -u root true"')
+
+        with subtest("test1 user should have sudo with password"):
+            machine.succeed('faketty -- su - test1 -c "echo ${password} | sudo -S -u root true"')
+
+        with subtest("test1 user should not be able to use sudo without password"):
+            machine.fail('faketty -- su - test1 -c "sudo -n -u root true"')
+
+        with subtest("users in group 'foobar' should be able to use sudo with password"):
+            machine.succeed('faketty -- su - test2 -c "echo ${password} | sudo -S -u root true"')
+
+        with subtest("users in group 'barfoo' should be able to use sudo without password"):
+            machine.succeed("sudo -u test3 sudo -n -u root true")
+
+        with subtest("users in group 'baz' (GID 1337)"):
+            machine.succeed("sudo -u test4 sudo -n -u root echo true")
+
+        with subtest("test5 user should be able to run commands under test1"):
+            machine.succeed("sudo -u test5 sudo -n -u test1 true")
+
+        with subtest("test5 user should not be able to run commands under root"):
+            machine.fail("sudo -u test5 sudo -n -u root true 2>/dev/null")
+
+        with subtest("users in wheel should be able to run sudo despite execWheelOnly"):
+            strict.succeed('faketty -- su - admin -c "sudo -u root true"')
+
+        with subtest("non-wheel users should be unable to run sudo thanks to execWheelOnly"):
+            strict.fail('faketty -- su - noadmin -c "sudo --help"')
+      '';
+  })
diff --git a/nixos/tests/sudo.nix b/nixos/tests/sudo.nix
index 3b6028c6f0b0..1b177391488d 100644
--- a/nixos/tests/sudo.nix
+++ b/nixos/tests/sudo.nix
@@ -5,7 +5,7 @@ let
 in
   import ./make-test-python.nix ({ lib, pkgs, ...} : {
     name = "sudo";
-    meta.maintainers = with lib.maintainers; [ lschuermann ];
+    meta.maintainers = pkgs.sudo.meta.maintainers;
 
     nodes.machine =
       { lib, ... }:
diff --git a/nixos/tests/switch-test.nix b/nixos/tests/switch-test.nix
index 53595ae7d3e2..5ffdf180d5e3 100644
--- a/nixos/tests/switch-test.nix
+++ b/nixos/tests/switch-test.nix
@@ -58,6 +58,37 @@ in {
       '');
 
       specialisation = rec {
+        brokenInitInterface.configuration.config.system.extraSystemBuilderCmds = ''
+          echo "systemd 0" > $out/init-interface-version
+        '';
+
+        modifiedSystemConf.configuration.systemd.extraConfig = ''
+          # Hello world!
+        '';
+
+        addedMount.configuration.virtualisation.fileSystems."/test" = {
+          device = "tmpfs";
+          fsType = "tmpfs";
+        };
+
+        addedMountOptsModified.configuration = {
+          imports = [ addedMount.configuration ];
+          virtualisation.fileSystems."/test".options = [ "x-test" ];
+        };
+
+        addedMountDevModified.configuration = {
+          imports = [ addedMountOptsModified.configuration ];
+          virtualisation.fileSystems."/test".device = lib.mkForce "ramfs";
+        };
+
+        storeMountModified.configuration = {
+          virtualisation.fileSystems."/".device = lib.mkForce "auto";
+        };
+
+        swap.configuration.swapDevices = lib.mkVMOverride [
+          { device = "/swapfile"; size = 1; }
+        ];
+
         simpleService.configuration = {
           systemd.services.test = {
             wantedBy = [ "multi-user.target" ];
@@ -450,7 +481,7 @@ in {
           ];
         };
 
-        mountModified.configuration = {
+        mountOptionsModified.configuration = {
           systemd.mounts = [
             {
               description = "Testmount";
@@ -463,6 +494,19 @@ in {
           ];
         };
 
+        mountModified.configuration = {
+          systemd.mounts = [
+            {
+              description = "Testmount";
+              what = "ramfs";
+              type = "ramfs";
+              where = "/testmount";
+              options = "size=10M";
+              wantedBy = [ "local-fs.target" ];
+            }
+          ];
+        };
+
         timer.configuration = {
           systemd.timers.test-timer = {
             wantedBy = [ "timers.target" ];
@@ -630,6 +674,97 @@ in {
 
         # test and dry-activate actions are tested further down below
 
+        # invalid action fails the script
+        switch_to_specialisation("${machine}", "", action="broken-action", fail=True)
+        # no action fails the script
+        assert "Usage:" in machine.fail("${machine}/bin/switch-to-configuration 2>&1")
+
+    with subtest("init interface version"):
+        # Do not try to switch to an invalid init interface version
+        assert "incompatible" in switch_to_specialisation("${machine}", "brokenInitInterface", fail=True)
+
+    with subtest("systemd restarts"):
+        # systemd is restarted when its system.conf changes
+        out = switch_to_specialisation("${machine}", "modifiedSystemConf")
+        assert_contains(out, "restarting systemd...")
+
+    with subtest("continuing from an aborted switch"):
+        # An aborted switch will write into a file what it tried to start
+        # and a second switch should continue from this
+        machine.succeed("echo dbus.service > /run/nixos/start-list")
+        out = switch_to_specialisation("${machine}", "modifiedSystemConf")
+        assert_contains(out, "starting the following units: dbus.service\n")
+
+    with subtest("fstab mounts"):
+        switch_to_specialisation("${machine}", "")
+        # add a mountpoint
+        out = switch_to_specialisation("${machine}", "addedMount")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: test.mount\n")
+        # modify the mountpoint's options
+        out = switch_to_specialisation("${machine}", "addedMountOptsModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: test.mount\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # modify the device
+        out = switch_to_specialisation("${machine}", "addedMountDevModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.mount\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # modify both
+        out = switch_to_specialisation("${machine}", "addedMount")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.mount\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # remove the mount
+        out = switch_to_specialisation("${machine}", "")
+        assert_contains(out, "stopping the following units: test.mount\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: dbus.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # change something about the / mount
+        out = switch_to_specialisation("${machine}", "storeMountModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_contains(out, "NOT restarting the following changed units: -.mount")
+        assert_contains(out, "reloading the following units: dbus.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+    with subtest("swaps"):
+        switch_to_specialisation("${machine}", "")
+        # add a swap
+        out = switch_to_specialisation("${machine}", "swap")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: dbus.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: swapfile.swap")
+        # remove it
+        out = switch_to_specialisation("${machine}", "")
+        assert_contains(out, "stopping swap device: /swapfile")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: dbus.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
     with subtest("services"):
         switch_to_specialisation("${machine}", "")
         # Nothing happens when nothing is changed
@@ -1137,7 +1272,8 @@ in {
         switch_to_specialisation("${machine}", "mount")
         out = machine.succeed("mount | grep 'on /testmount'")
         assert_contains(out, "size=1024k")
-        out = switch_to_specialisation("${machine}", "mountModified")
+        # Changing options reloads the unit
+        out = switch_to_specialisation("${machine}", "mountOptionsModified")
         assert_lacks(out, "stopping the following units:")
         assert_lacks(out, "NOT restarting the following changed units:")
         assert_contains(out, "reloading the following units: testmount.mount\n")
@@ -1147,6 +1283,17 @@ in {
         # It changed
         out = machine.succeed("mount | grep 'on /testmount'")
         assert_contains(out, "size=10240k")
+        # Changing anything but `Options=` restarts the unit
+        out = switch_to_specialisation("${machine}", "mountModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: testmount.mount\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # It changed
+        out = machine.succeed("mount | grep 'on /testmount'")
+        assert_contains(out, "ramfs")
 
     with subtest("timers"):
         switch_to_specialisation("${machine}", "timer")
diff --git a/nixos/tests/systemd-initrd-swraid.nix b/nixos/tests/systemd-initrd-swraid.nix
index d87170c92574..d00e67b5705a 100644
--- a/nixos/tests/systemd-initrd-swraid.nix
+++ b/nixos/tests/systemd-initrd-swraid.nix
@@ -20,6 +20,9 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
         ARRAY /dev/md0 devices=/dev/vdb,/dev/vdc
       '';
     };
+    environment.etc."mdadm.conf".text = ''
+      MAILADDR test@example.com
+    '';
     boot.initrd = {
       systemd = {
         enable = true;
@@ -29,11 +32,14 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
     };
 
     specialisation.boot-swraid.configuration.virtualisation.rootDevice = "/dev/disk/by-label/testraid";
+    # This protects against a regression. We do not have to switch to it.
+    # It's sufficient to trigger its evaluation.
+    specialisation.build-old-initrd.configuration.boot.initrd.systemd.enable = lib.mkForce false;
   };
 
   testScript = ''
     # Create RAID
-    machine.succeed("mdadm --create --force /dev/md0 -n 2 --level=raid0 /dev/vdb /dev/vdc")
+    machine.succeed("mdadm --create --force /dev/md0 -n 2 --level=raid1 /dev/vdb /dev/vdc --metadata=0.90")
     machine.succeed("mkfs.ext4 -L testraid /dev/md0")
     machine.succeed("mkdir -p /mnt && mount /dev/md0 /mnt && echo hello > /mnt/test && umount /mnt")
 
@@ -48,5 +54,13 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
     assert "/dev/md0 on / type ext4" in machine.succeed("mount")
     assert "hello" in machine.succeed("cat /test")
     assert "md0" in machine.succeed("cat /proc/mdstat")
+
+    expected_config = """MAILADDR test@example.com
+
+    ARRAY /dev/md0 devices=/dev/vdb,/dev/vdc
+    """
+    got_config = machine.execute("cat /etc/mdadm.conf")[1]
+    assert expected_config == got_config, repr((expected_config, got_config))
+    machine.wait_for_unit("mdmonitor.service")
   '';
 })
diff --git a/nixos/tests/user-activation-scripts.nix b/nixos/tests/user-activation-scripts.nix
index 5df072ce0508..ebd96b019e92 100644
--- a/nixos/tests/user-activation-scripts.nix
+++ b/nixos/tests/user-activation-scripts.nix
@@ -8,6 +8,7 @@ import ./make-test-python.nix ({ lib, ... }: {
       initialPassword = "pass1";
       isNormalUser = true;
     };
+    systemd.user.tmpfiles.users.alice.rules = [ "r %h/file-to-remove" ];
   };
 
   testScript = ''
@@ -27,7 +28,9 @@ import ./make-test-python.nix ({ lib, ... }: {
     machine.wait_for_file("/home/alice/login-ok")
     verify_user_activation_run_count(1)
 
+    machine.succeed("touch /home/alice/file-to-remove")
     machine.succeed("/run/current-system/bin/switch-to-configuration test")
     verify_user_activation_run_count(2)
+    machine.succeed("[[ ! -f /home/alice/file-to-remove ]] || false")
   '';
 })
diff --git a/nixos/tests/user-expiry.nix b/nixos/tests/user-expiry.nix
new file mode 100644
index 000000000000..bcaed7a0ccb0
--- /dev/null
+++ b/nixos/tests/user-expiry.nix
@@ -0,0 +1,70 @@
+let
+  alice = "alice";
+  bob = "bob";
+  eve = "eve";
+  passwd = "pass1";
+in
+{
+  name = "user-expiry";
+
+  nodes = {
+    machine = {
+      users.users = {
+        ${alice} = {
+          initialPassword = passwd;
+          isNormalUser = true;
+          expires = "1990-01-01";
+        };
+        ${bob} = {
+          initialPassword = passwd;
+          isNormalUser = true;
+          expires = "2990-01-01";
+        };
+        ${eve} = {
+          initialPassword = passwd;
+          isNormalUser = true;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    def switch_to_tty(tty_number):
+      machine.fail(f"pgrep -f 'agetty.*tty{tty_number}'")
+      machine.send_key(f"alt-f{tty_number}")
+      machine.wait_until_succeeds(f"[ $(fgconsole) = {tty_number} ]")
+      machine.wait_for_unit(f"getty@tty{tty_number}.service")
+      machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{tty_number}'")
+
+
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("getty@tty1.service")
+
+    with subtest("${alice} cannot login"):
+      machine.wait_until_tty_matches("1", "login: ")
+      machine.send_chars("${alice}\n")
+      machine.wait_until_tty_matches("1", "Password: ")
+      machine.send_chars("${passwd}\n")
+
+      machine.wait_until_succeeds("journalctl --grep='account ${alice} has expired \\(account expired\\)'")
+      machine.wait_until_tty_matches("1", "login: ")
+
+    with subtest("${bob} can login"):
+      switch_to_tty(2)
+      machine.wait_until_tty_matches("2", "login: ")
+      machine.send_chars("${bob}\n")
+      machine.wait_until_tty_matches("2", "Password: ")
+      machine.send_chars("${passwd}\n")
+
+      machine.wait_until_succeeds("pgrep -u ${bob} bash")
+
+    with subtest("${eve} can login"):
+      switch_to_tty(3)
+      machine.wait_until_tty_matches("3", "login: ")
+      machine.send_chars("${eve}\n")
+      machine.wait_until_tty_matches("3", "Password: ")
+      machine.send_chars("${passwd}\n")
+
+      machine.wait_until_succeeds("pgrep -u ${eve} bash")
+  '';
+}
diff --git a/nixos/tests/wordpress.nix b/nixos/tests/wordpress.nix
index 4e322774fef5..106bbff46c54 100644
--- a/nixos/tests/wordpress.nix
+++ b/nixos/tests/wordpress.nix
@@ -67,7 +67,7 @@ rec {
       networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
     };
   }) {} [
-    "6_1" "6_2"
+    "6_1" "6_2" "6_3"
   ];
 
   testScript = ''
diff --git a/nixos/tests/wrappers.nix b/nixos/tests/wrappers.nix
index 391e9b42b45b..1d4fa85d7399 100644
--- a/nixos/tests/wrappers.nix
+++ b/nixos/tests/wrappers.nix
@@ -21,6 +21,8 @@ in
       };
     };
 
+    security.apparmor.enable = true;
+
     security.wrappers = {
       suidRoot = {
         owner = "root";
@@ -84,17 +86,27 @@ in
       test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -g', '0')
       test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -rg', '0')
 
+      # Test that in nonewprivs environment the wrappers simply exec their target.
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -u', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -ru', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -g', '${toString usersGid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -rg', '${toString usersGid}')
+
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -u', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -ru', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -g', '${toString usersGid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -rg', '${toString usersGid}')
+
       # We are only testing the permitted set, because it's easiest to look at with capsh.
       machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_CHOWN'))
       machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_SYS_ADMIN'))
       machine.succeed(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_CHOWN'))
       machine.fail(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_SYS_ADMIN'))
 
-      # test a few "attacks" against which the wrapper protects itself
-      machine.succeed("cp /run/wrappers/bin/suid_root_busybox{,.real} /tmp/")
-      machine.fail(cmd_as_regular("/tmp/suid_root_busybox id -u"))
-
-      machine.succeed("chmod u+s,a+w /run/wrappers/bin/suid_root_busybox")
-      machine.fail(cmd_as_regular("/run/wrappers/bin/suid_root_busybox id -u"))
+      # Test that the only user of apparmor policy includes generated by
+      # wrappers works. Ideally this'd be located in a test for the module that
+      # actually makes the apparmor policy for ping, but there's no convenient
+      # test for that one.
+      machine.succeed("ping -c 1 127.0.0.1")
     '';
 })
diff --git a/nixos/tests/zfs.nix b/nixos/tests/zfs.nix
index 8e52e0065745..800f5e43cd15 100644
--- a/nixos/tests/zfs.nix
+++ b/nixos/tests/zfs.nix
@@ -113,6 +113,8 @@ let
       };
 
       testScript = ''
+        # TODO: Remove this when upgrading stable to zfs 2.2.0
+        unstable = ${if enableUnstable then "True" else "False"};
         machine.wait_for_unit("multi-user.target")
         machine.succeed(
             "zpool status",
@@ -133,9 +135,10 @@ let
             )
             machine.crash()
             machine.wait_for_unit("multi-user.target")
+            machine.succeed("zfs set sharesmb=on rpool/shared_smb")
+            if not unstable:
+                machine.succeed("zfs share rpool/shared_smb")
             machine.succeed(
-                "zfs set sharesmb=on rpool/shared_smb",
-                "zfs share rpool/shared_smb",
                 "smbclient -gNL localhost | grep rpool_shared_smb",
                 "umount /tmp/mnt",
                 "zpool destroy rpool",