about summary refs log tree commit diff
path: root/nixpkgs/nixos/modules
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2024-04-10 20:43:08 +0200
committerAlyssa Ross <hi@alyssa.is>2024-04-10 20:43:08 +0200
commit69bfdf2484041b9d242840c4e5017b4703383bb0 (patch)
treed8bdaa69e7990d7d6f09b594b3c425f742acd2d0 /nixpkgs/nixos/modules
parentc8aee4b4363b6bf905a521b05b7476960e8286c8 (diff)
parentd8fe5e6c92d0d190646fb9f1056741a229980089 (diff)
downloadnixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar.gz
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar.bz2
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar.lz
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar.xz
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar.zst
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.zip
Merge commit 'd8fe5e6c'
Conflicts:
	nixpkgs/pkgs/build-support/go/module.nix
Diffstat (limited to 'nixpkgs/nixos/modules')
-rw-r--r--nixpkgs/nixos/modules/config/users-groups.nix36
-rw-r--r--nixpkgs/nixos/modules/hardware/video/nvidia.nix13
-rw-r--r--nixpkgs/nixos/modules/i18n/input-method/default.md70
-rw-r--r--nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl2
-rw-r--r--nixpkgs/nixos/modules/module-list.nix10
-rw-r--r--nixpkgs/nixos/modules/profiles/all-hardware.nix9
-rw-r--r--nixpkgs/nixos/modules/profiles/macos-builder.nix5
-rw-r--r--nixpkgs/nixos/modules/programs/digitalbitbox/default.md36
-rw-r--r--nixpkgs/nixos/modules/programs/goldwarden.nix50
-rw-r--r--nixpkgs/nixos/modules/programs/nix-ld.nix42
-rw-r--r--nixpkgs/nixos/modules/programs/plotinus.md6
-rw-r--r--nixpkgs/nixos/modules/programs/steam.nix4
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md8
-rw-r--r--nixpkgs/nixos/modules/security/acme/default.md366
-rw-r--r--nixpkgs/nixos/modules/services/audio/castopod.md20
-rw-r--r--nixpkgs/nixos/modules/services/audio/roon-server.nix3
-rw-r--r--nixpkgs/nixos/modules/services/backup/borgbackup.md31
-rw-r--r--nixpkgs/nixos/modules/services/databases/foundationdb.md20
-rw-r--r--nixpkgs/nixos/modules/services/databases/postgresql.md42
-rw-r--r--nixpkgs/nixos/modules/services/databases/tigerbeetle.md8
-rw-r--r--nixpkgs/nixos/modules/services/desktops/flatpak.md8
-rw-r--r--nixpkgs/nixos/modules/services/desktops/flatpak.nix2
-rw-r--r--nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix145
-rw-r--r--nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix83
-rw-r--r--nixpkgs/nixos/modules/services/development/athens.md8
-rw-r--r--nixpkgs/nixos/modules/services/development/blackfire.md2
-rw-r--r--nixpkgs/nixos/modules/services/development/livebook.md8
-rw-r--r--nixpkgs/nixos/modules/services/editors/emacs.md22
-rw-r--r--nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix2
-rw-r--r--nixpkgs/nixos/modules/services/mail/mailman.md4
-rw-r--r--nixpkgs/nixos/modules/services/matrix/maubot.md90
-rw-r--r--nixpkgs/nixos/modules/services/matrix/mjolnir.md4
-rw-r--r--nixpkgs/nixos/modules/services/matrix/synapse.md6
-rw-r--r--nixpkgs/nixos/modules/services/misc/anki-sync-server.md6
-rw-r--r--nixpkgs/nixos/modules/services/misc/forgejo.md40
-rw-r--r--nixpkgs/nixos/modules/services/misc/gitlab.md82
-rw-r--r--nixpkgs/nixos/modules/services/misc/mollysocket.nix133
-rw-r--r--nixpkgs/nixos/modules/services/misc/paperless.nix16
-rw-r--r--nixpkgs/nixos/modules/services/misc/sourcehut/default.md8
-rw-r--r--nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix6
-rw-r--r--nixpkgs/nixos/modules/services/misc/weechat.md4
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/certspotter.md48
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/goss.md2
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/parsedmarc.md124
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md14
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md2
-rw-r--r--nixpkgs/nixos/modules/services/networking/dnsproxy.nix106
-rw-r--r--nixpkgs/nixos/modules/services/networking/firefox-syncserver.md24
-rw-r--r--nixpkgs/nixos/modules/services/networking/microsocks.nix146
-rw-r--r--nixpkgs/nixos/modules/services/networking/mosquitto.md73
-rw-r--r--nixpkgs/nixos/modules/services/networking/netbird.md26
-rw-r--r--nixpkgs/nixos/modules/services/networking/pleroma.md196
-rw-r--r--nixpkgs/nixos/modules/services/networking/prosody.md60
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-control.nix69
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix64
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix74
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-router.nix49
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion.nix39
-rw-r--r--nixpkgs/nixos/modules/services/networking/yggdrasil.md6
-rw-r--r--nixpkgs/nixos/modules/services/search/meilisearch.md4
-rw-r--r--nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix35
-rw-r--r--nixpkgs/nixos/modules/services/security/usbguard.nix4
-rw-r--r--nixpkgs/nixos/modules/services/video/photonvision.nix64
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/akkoma.md284
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md4
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/discourse.md228
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/gotosocial.md52
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/grocy.md6
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md4
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/kavita.nix63
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/keycloak.md24
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/lemmy.md14
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/nextcloud.md6
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/peertube.nix292
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/pict-rs.md4
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/plausible.md2
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/pretix.nix2
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/slskd.nix326
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/suwayomi-server.md2
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/garage.md2
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md110
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md37
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix1
-rw-r--r--nixpkgs/nixos/modules/services/x11/xserver.nix2
-rw-r--r--nixpkgs/nixos/modules/system/boot/clevis.md12
-rw-r--r--nixpkgs/nixos/modules/system/boot/initrd-ssh.nix30
-rw-r--r--nixpkgs/nixos/modules/system/boot/plymouth.nix6
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/initrd.nix5
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/envfs.nix4
-rw-r--r--nixpkgs/nixos/modules/virtualisation/incus.nix165
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix120
91 files changed, 2933 insertions, 1563 deletions
diff --git a/nixpkgs/nixos/modules/config/users-groups.nix b/nixpkgs/nixos/modules/config/users-groups.nix
index 02cd1a17f538..f9750b7263ca 100644
--- a/nixpkgs/nixos/modules/config/users-groups.nix
+++ b/nixpkgs/nixos/modules/config/users-groups.nix
@@ -496,6 +496,7 @@ let
     in
       filter types.shellPackage.check shells;
 
+  lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
 in {
   imports = [
     (mkAliasOptionModuleMD [ "users" "extraUsers" ] [ "users" "users" ])
@@ -695,24 +696,31 @@ in {
       '';
     } else ""; # keep around for backwards compatibility
 
-    system.activationScripts.update-lingering = let
-      lingerDir = "/var/lib/systemd/linger";
-      lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
-      lingeringUsersFile = builtins.toFile "lingering-users"
-        (concatStrings (map (s: "${s}\n")
-          (sort (a: b: a < b) lingeringUsers)));  # this sorting is important for `comm` to work correctly
-    in stringAfter [ "users" ] ''
-      if [ -e ${lingerDir} ] ; then
+    systemd.services.linger-users = lib.mkIf ((builtins.length lingeringUsers) > 0) {
+      wantedBy = ["multi-user.target"];
+      after = ["systemd-logind.service"];
+      requires = ["systemd-logind.service"];
+
+      script = let
+        lingerDir = "/var/lib/systemd/linger";
+        lingeringUsersFile = builtins.toFile "lingering-users"
+          (concatStrings (map (s: "${s}\n")
+            (sort (a: b: a < b) lingeringUsers)));  # this sorting is important for `comm` to work correctly
+      in ''
+        mkdir -vp ${lingerDir}
         cd ${lingerDir}
-        for user in ${lingerDir}/*; do
-          if ! id "$user" >/dev/null 2>&1; then
+        for user in $(ls); do
+          if ! id "$user" >/dev/null; then
+            echo "Removing linger for missing user $user"
             rm --force -- "$user"
           fi
         done
-        ls ${lingerDir} | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
-        ls ${lingerDir} | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl  enable-linger
-      fi
-    '';
+        ls | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
+        ls | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl  enable-linger
+      '';
+
+      serviceConfig.Type = "oneshot";
+    };
 
     # Warn about user accounts with deprecated password hashing schemes
     # This does not work when the users and groups are created by
diff --git a/nixpkgs/nixos/modules/hardware/video/nvidia.nix b/nixpkgs/nixos/modules/hardware/video/nvidia.nix
index 3b983f768f91..352c8d8ead54 100644
--- a/nixpkgs/nixos/modules/hardware/video/nvidia.nix
+++ b/nixpkgs/nixos/modules/hardware/video/nvidia.nix
@@ -396,6 +396,9 @@ in {
             modules = [nvidia_x11.bin];
             display = !offloadCfg.enable;
             deviceSection =
+              ''
+                Option "SidebandSocketPath" "/run/nvidia-xdriver/"
+              '' +
               lib.optionalString primeEnabled
               ''
                 BusID "${pCfg.nvidiaBusId}"
@@ -533,8 +536,14 @@ in {
 
         hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
 
-        systemd.tmpfiles.rules =
-          lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+        systemd.tmpfiles.rules = [
+          # Remove the following log message:
+          #    (WW) NVIDIA: Failed to bind sideband socket to
+          #    (WW) NVIDIA:     '/var/run/nvidia-xdriver-b4f69129' Permission denied
+          #
+          # https://bbs.archlinux.org/viewtopic.php?pid=1909115#p1909115
+          "d /run/nvidia-xdriver 0770 root users"
+        ] ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
           "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
 
         boot = {
diff --git a/nixpkgs/nixos/modules/i18n/input-method/default.md b/nixpkgs/nixos/modules/i18n/input-method/default.md
index 42cb8a8d7b6a..6d12462b788e 100644
--- a/nixpkgs/nixos/modules/i18n/input-method/default.md
+++ b/nixpkgs/nixos/modules/i18n/input-method/default.md
@@ -22,11 +22,13 @@ friendly input method user interface.
 
 The following snippet can be used to configure IBus:
 
-```
-i18n.inputMethod = {
-  enabled = "ibus";
-  ibus.engines = with pkgs.ibus-engines; [ anthy hangul mozc ];
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "ibus";
+    ibus.engines = with pkgs.ibus-engines; [ anthy hangul mozc ];
+  };
+}
 ```
 
 `i18n.inputMethod.ibus.engines` is optional and can be used
@@ -48,8 +50,10 @@ Available extra IBus engines are:
     methods, it must appear in the list of engines along with
     `table`. For example:
 
-    ```
-    ibus.engines = with pkgs.ibus-engines; [ table table-others ];
+    ```nix
+    {
+      ibus.engines = with pkgs.ibus-engines; [ table table-others ];
+    }
     ```
 
 To use any input method, the package must be added in the configuration, as
@@ -74,11 +78,13 @@ built-in Input Method Engine, Pinyin, QuWei and Table-based input methods.
 
 The following snippet can be used to configure Fcitx:
 
-```
-i18n.inputMethod = {
-  enabled = "fcitx5";
-  fcitx5.addons = with pkgs; [ fcitx5-mozc fcitx5-hangul fcitx5-m17n ];
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "fcitx5";
+    fcitx5.addons = with pkgs; [ fcitx5-mozc fcitx5-hangul fcitx5-m17n ];
+  };
+}
 ```
 
 `i18n.inputMethod.fcitx5.addons` is optional and can be
@@ -110,10 +116,12 @@ phonetic Korean characters (hangul) and pictographic Korean characters
 
 The following snippet can be used to configure Nabi:
 
-```
-i18n.inputMethod = {
-  enabled = "nabi";
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "nabi";
+  };
+}
 ```
 
 ## Uim {#module-services-input-methods-uim}
@@ -123,10 +131,12 @@ framework. Applications can use it through so-called bridges.
 
 The following snippet can be used to configure uim:
 
-```
-i18n.inputMethod = {
-  enabled = "uim";
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "uim";
+  };
+}
 ```
 
 Note: The [](#opt-i18n.inputMethod.uim.toolbar) option can be
@@ -141,10 +151,12 @@ etc...
 
 The following snippet can be used to configure Hime:
 
-```
-i18n.inputMethod = {
-  enabled = "hime";
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "hime";
+  };
+}
 ```
 
 ## Kime {#module-services-input-methods-kime}
@@ -153,8 +165,10 @@ Kime is Korean IME. it's built with Rust language and let you get simple, safe,
 
 The following snippet can be used to configure Kime:
 
-```
-i18n.inputMethod = {
-  enabled = "kime";
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "kime";
+  };
+}
 ```
diff --git a/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl b/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl
index 2f9edba4f0c9..ef25b8b296e6 100644
--- a/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl
+++ b/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl
@@ -257,7 +257,7 @@ foreach my $path (glob "/sys/class/{block,mmc_host}/*") {
 
 # Add bcache module, if needed.
 my @bcacheDevices = glob("/dev/bcache*");
-@bcacheDevices = grep(!qr#dev/bcachefs.*#, @bcacheDevices);
+@bcacheDevices = grep(!m#dev/bcachefs.*#, @bcacheDevices);
 if (scalar @bcacheDevices > 0) {
     push @initrdAvailableKernelModules, "bcache";
 }
diff --git a/nixpkgs/nixos/modules/module-list.nix b/nixpkgs/nixos/modules/module-list.nix
index a103869c16aa..2ee646291e3b 100644
--- a/nixpkgs/nixos/modules/module-list.nix
+++ b/nixpkgs/nixos/modules/module-list.nix
@@ -193,6 +193,7 @@
   ./programs/gnome-disks.nix
   ./programs/gnome-terminal.nix
   ./programs/gnupg.nix
+  ./programs/goldwarden.nix
   ./programs/gpaste.nix
   ./programs/gphoto2.nix
   ./programs/haguichi.nix
@@ -728,6 +729,7 @@
   ./services/misc/mbpfan.nix
   ./services/misc/mediatomb.nix
   ./services/misc/metabase.nix
+  ./services/misc/mollysocket.nix
   ./services/misc/moonraker.nix
   ./services/misc/mqtt2influxdb.nix
   ./services/misc/n8n.nix
@@ -945,6 +947,7 @@
   ./services/networking/dnscrypt-wrapper.nix
   ./services/networking/dnsdist.nix
   ./services/networking/dnsmasq.nix
+  ./services/networking/dnsproxy.nix
   ./services/networking/doh-proxy-rust.nix
   ./services/networking/ejabberd.nix
   ./services/networking/envoy.nix
@@ -1020,6 +1023,7 @@
   ./services/networking/lxd-image-server.nix
   ./services/networking/magic-wormhole-mailbox-server.nix
   ./services/networking/matterbridge.nix
+  ./services/networking/microsocks.nix
   ./services/networking/mihomo.nix
   ./services/networking/minidlna.nix
   ./services/networking/miniupnpd.nix
@@ -1106,6 +1110,11 @@
   ./services/networking/rpcbind.nix
   ./services/networking/rxe.nix
   ./services/networking/sabnzbd.nix
+  ./services/networking/scion/scion.nix
+  ./services/networking/scion/scion-control.nix
+  ./services/networking/scion/scion-daemon.nix
+  ./services/networking/scion/scion-dispatcher.nix
+  ./services/networking/scion/scion-router.nix
   ./services/networking/seafile.nix
   ./services/networking/searx.nix
   ./services/networking/shadowsocks.nix
@@ -1274,6 +1283,7 @@
   ./services/video/go2rtc/default.nix
   ./services/video/frigate.nix
   ./services/video/mirakurun.nix
+  ./services/video/photonvision.nix
   ./services/video/replay-sorcery.nix
   ./services/video/mediamtx.nix
   ./services/video/unifi-video.nix
diff --git a/nixpkgs/nixos/modules/profiles/all-hardware.nix b/nixpkgs/nixos/modules/profiles/all-hardware.nix
index 4857ea4dbeae..249b767593f2 100644
--- a/nixpkgs/nixos/modules/profiles/all-hardware.nix
+++ b/nixpkgs/nixos/modules/profiles/all-hardware.nix
@@ -58,15 +58,7 @@ in
       # Hyper-V support.
       "hv_storvsc"
     ] ++ lib.optionals pkgs.stdenv.hostPlatform.isAarch [
-      # Most of the following falls into two categories:
-      #  - early KMS / early display
-      #  - early storage (e.g. USB) support
-
-      # Allows using framebuffer configured by the initial boot firmware
-      "simplefb"
-
       # Allwinner support
-
       # Required for early KMS
       "sun4i-drm"
       "sun8i-mixer" # Audio, but required for kms
@@ -75,7 +67,6 @@ in
       "pwm-sun4i"
 
       # Broadcom
-
       "vc4"
     ] ++ lib.optionals pkgs.stdenv.isAarch64 [
       # Most of the following falls into two categories:
diff --git a/nixpkgs/nixos/modules/profiles/macos-builder.nix b/nixpkgs/nixos/modules/profiles/macos-builder.nix
index 6c2602881d6b..786e26cf98f7 100644
--- a/nixpkgs/nixos/modules/profiles/macos-builder.nix
+++ b/nixpkgs/nixos/modules/profiles/macos-builder.nix
@@ -145,6 +145,8 @@ in
         # This installCredentials script is written so that it's as easy as
         # possible for a user to audit before confirming the `sudo`
         installCredentials = hostPkgs.writeShellScript "install-credentials" ''
+          set -euo pipefail
+
           KEYS="''${1}"
           INSTALL=${hostPkgs.coreutils}/bin/install
           "''${INSTALL}" -g nixbld -m 600 "''${KEYS}/${user}_${keyType}" ${privateKey}
@@ -154,6 +156,9 @@ in
         hostPkgs = config.virtualisation.host.pkgs;
 
         script = hostPkgs.writeShellScriptBin "create-builder" (
+          ''
+            set -euo pipefail
+          '' +
           # When running as non-interactively as part of a DarwinConfiguration the working directory
           # must be set to a writeable directory.
         (if cfg.workingDirectory != "." then ''
diff --git a/nixpkgs/nixos/modules/programs/digitalbitbox/default.md b/nixpkgs/nixos/modules/programs/digitalbitbox/default.md
index 9bca14e97ffe..5147bb971e3a 100644
--- a/nixpkgs/nixos/modules/programs/digitalbitbox/default.md
+++ b/nixpkgs/nixos/modules/programs/digitalbitbox/default.md
@@ -4,8 +4,10 @@ Digital Bitbox is a hardware wallet and second-factor authenticator.
 
 The `digitalbitbox` programs module may be installed by setting
 `programs.digitalbitbox` to `true` in a manner similar to
-```
-programs.digitalbitbox.enable = true;
+```nix
+{
+  programs.digitalbitbox.enable = true;
+}
 ```
 and bundles the `digitalbitbox` package (see [](#sec-digitalbitbox-package)),
 which contains the `dbb-app` and `dbb-cli` binaries, along with the hardware
@@ -21,27 +23,33 @@ For more information, see <https://digitalbitbox.com/start_linux>.
 
 The binaries, `dbb-app` (a GUI tool) and `dbb-cli` (a CLI tool), are available
 through the `digitalbitbox` package which could be installed as follows:
-```
-environment.systemPackages = [
-  pkgs.digitalbitbox
-];
+```nix
+{
+  environment.systemPackages = [
+    pkgs.digitalbitbox
+  ];
+}
 ```
 
 ## Hardware {#sec-digitalbitbox-hardware-module}
 
 The digitalbitbox hardware package enables the udev rules for Digital Bitbox
 devices and may be installed as follows:
-```
-hardware.digitalbitbox.enable = true;
+```nix
+{
+  hardware.digitalbitbox.enable = true;
+}
 ```
 
 In order to alter the udev rules, one may provide different values for the
 `udevRule51` and `udevRule52` attributes by means of overriding as follows:
-```
-programs.digitalbitbox = {
-  enable = true;
-  package = pkgs.digitalbitbox.override {
-    udevRule51 = "something else";
+```nix
+{
+  programs.digitalbitbox = {
+    enable = true;
+    package = pkgs.digitalbitbox.override {
+      udevRule51 = "something else";
+    };
   };
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/programs/goldwarden.nix b/nixpkgs/nixos/modules/programs/goldwarden.nix
new file mode 100644
index 000000000000..26f9a87c1986
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/goldwarden.nix
@@ -0,0 +1,50 @@
+{ lib, config, pkgs, ... }:
+let
+  cfg = config.programs.goldwarden;
+in
+{
+  options.programs.goldwarden = {
+    enable = lib.mkEnableOption "Goldwarden";
+    package = lib.mkPackageOption pkgs "goldwarden" {};
+    useSshAgent = lib.mkEnableOption "Goldwarden's SSH Agent" // { default = true; };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [{
+       assertion = cfg.useSshAgent -> !config.programs.ssh.startAgent;
+       message = "Only one ssh-agent can be used at a time.";
+    }];
+
+    environment = {
+      etc = lib.mkIf config.programs.chromium.enable {
+        "chromium/native-messaging-hosts/com.8bit.bitwarden.json".source = "${cfg.package}/etc/chromium/native-messaging-hosts/com.8bit.bitwarden.json";
+        "opt/chrome/native-messaging-hosts/com.8bit.bitwarden.json".source = "${cfg.package}/etc/chrome/native-messaging-hosts/com.8bit.bitwarden.json";
+      };
+
+      extraInit = lib.mkIf cfg.useSshAgent ''
+        if [ -z "$SSH_AUTH_SOCK" -a -n "$HOME" ]; then
+          export SSH_AUTH_SOCK="$HOME/.goldwarden-ssh-agent.sock"
+        fi
+      '';
+
+      systemPackages = [
+        # for cli and polkit action
+        cfg.package
+        # binary exec's into pinentry which should match the DE
+        config.programs.gnupg.agent.pinentryPackage
+      ];
+    };
+
+    programs.firefox.nativeMessagingHosts.packages = [ cfg.package ];
+
+    # see https://github.com/quexten/goldwarden/blob/main/cmd/goldwarden.service
+    systemd.user.services.goldwarden = {
+      description = "Goldwarden daemon";
+      wantedBy = [ "graphical-session.target" ];
+      after = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = "${lib.getExe cfg.package} daemonize";
+      path = [ config.programs.gnupg.agent.pinentryPackage ];
+      unitConfig.ConditionUser = "!@system";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/nix-ld.nix b/nixpkgs/nixos/modules/programs/nix-ld.nix
index 6f36ce33640c..b095437733cc 100644
--- a/nixpkgs/nixos/modules/programs/nix-ld.nix
+++ b/nixpkgs/nixos/modules/programs/nix-ld.nix
@@ -3,7 +3,7 @@ let
   cfg = config.programs.nix-ld;
 
   nix-ld-libraries = pkgs.buildEnv {
-    name = "lb-library-path";
+    name = "ld-library-path";
     pathsToLink = [ "/lib" ];
     paths = map lib.getLib cfg.libraries;
     # TODO make glibc here configurable?
@@ -13,25 +13,6 @@ let
     extraPrefix = "/share/nix-ld";
     ignoreCollisions = true;
   };
-
-  # We currently take all libraries from systemd and nix as the default.
-  # Is there a better list?
-  baseLibraries = with pkgs; [
-    zlib
-    zstd
-    stdenv.cc.cc
-    curl
-    openssl
-    attr
-    libssh
-    bzip2
-    libxml2
-    acl
-    libsodium
-    util-linux
-    xz
-    systemd
-  ];
 in
 {
   meta.maintainers = [ lib.maintainers.mic92 ];
@@ -41,7 +22,7 @@ in
     libraries = lib.mkOption {
       type = lib.types.listOf lib.types.package;
       description = lib.mdDoc "Libraries that automatically become available to all programs. The default set includes common libraries.";
-      default = baseLibraries;
+      default = [ ];
       defaultText = lib.literalExpression "baseLibraries derived from systemd and nix dependencies.";
     };
   };
@@ -57,5 +38,24 @@ in
       NIX_LD = "/run/current-system/sw/share/nix-ld/lib/ld.so";
       NIX_LD_LIBRARY_PATH = "/run/current-system/sw/share/nix-ld/lib";
     };
+
+    # We currently take all libraries from systemd and nix as the default.
+    # Is there a better list?
+    programs.nix-ld.libraries = with pkgs; [
+      zlib
+      zstd
+      stdenv.cc.cc
+      curl
+      openssl
+      attr
+      libssh
+      bzip2
+      libxml2
+      acl
+      libsodium
+      util-linux
+      xz
+      systemd
+    ];
   };
 }
diff --git a/nixpkgs/nixos/modules/programs/plotinus.md b/nixpkgs/nixos/modules/programs/plotinus.md
index fac3bbad1e08..0a2c688c722c 100644
--- a/nixpkgs/nixos/modules/programs/plotinus.md
+++ b/nixpkgs/nixos/modules/programs/plotinus.md
@@ -12,6 +12,8 @@ palette provides a searchable list of of all menu items in the application.
 
 To enable Plotinus, add the following to your
 {file}`configuration.nix`:
-```
-programs.plotinus.enable = true;
+```nix
+{
+  programs.plotinus.enable = true;
+}
 ```
diff --git a/nixpkgs/nixos/modules/programs/steam.nix b/nixpkgs/nixos/modules/programs/steam.nix
index c93a34f61849..bab9bf8107b6 100644
--- a/nixpkgs/nixos/modules/programs/steam.nix
+++ b/nixpkgs/nixos/modules/programs/steam.nix
@@ -45,6 +45,8 @@ in {
       apply = steam: steam.override (prev: {
         extraEnv = (lib.optionalAttrs (cfg.extraCompatPackages != [ ]) {
           STEAM_EXTRA_COMPAT_TOOLS_PATHS = makeSearchPathOutput "steamcompattool" "" cfg.extraCompatPackages;
+        }) // (optionalAttrs cfg.extest.enable {
+          LD_PRELOAD = "${pkgs.pkgsi686Linux.extest}/lib/libextest.so";
         }) // (prev.extraEnv or {});
         extraLibraries = pkgs: let
           prevLibs = if prev ? extraLibraries then prev.extraLibraries pkgs else [ ];
@@ -59,8 +61,6 @@ in {
           # use the setuid wrapped bubblewrap
           bubblewrap = "${config.security.wrapperDir}/..";
         };
-      } // optionalAttrs cfg.extest.enable {
-        extraEnv.LD_PRELOAD = "${pkgs.pkgsi686Linux.extest}/lib/libextest.so";
       });
       description = lib.mdDoc ''
         The Steam package to use. Additional libraries are added from the system
diff --git a/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md b/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md
index 6a310006edbf..7e4a41641eea 100644
--- a/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md
+++ b/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md
@@ -9,7 +9,7 @@ prompt themes.
 The module uses the `oh-my-zsh` package with all available
 features. The initial setup using Nix expressions is fairly similar to the
 configuration format of `oh-my-zsh`.
-```
+```nix
 {
   programs.zsh.ohMyZsh = {
     enable = true;
@@ -33,7 +33,7 @@ environment variable for this which points to a directory with additional
 scripts.
 
 The module can do this as well:
-```
+```nix
 {
   programs.zsh.ohMyZsh.custom = "~/path/to/custom/scripts";
 }
@@ -48,7 +48,7 @@ which bundles completion scripts and a plugin for `oh-my-zsh`.
 
 Rather than using a single mutable path for `ZSH_CUSTOM`,
 it's also possible to generate this path from a list of Nix packages:
-```
+```nix
 { pkgs, ... }:
 {
   programs.zsh.ohMyZsh.customPkgs = [
@@ -89,7 +89,7 @@ If third-party customizations (e.g. new themes) are supposed to be added to
     [upstream repo.](https://github.com/robbyrussell/oh-my-zsh/tree/91b771914bc7c43dd7c7a43b586c5de2c225ceb7/plugins)
 
 A derivation for `oh-my-zsh` may look like this:
-```
+```nix
 { stdenv, fetchFromGitHub }:
 
 stdenv.mkDerivation rec {
diff --git a/nixpkgs/nixos/modules/security/acme/default.md b/nixpkgs/nixos/modules/security/acme/default.md
index 38fbfbf0caec..a6ef2a3fdf18 100644
--- a/nixpkgs/nixos/modules/security/acme/default.md
+++ b/nixpkgs/nixos/modules/security/acme/default.md
@@ -46,33 +46,35 @@ certs are overwritten when the ACME certs arrive. For
 `foo.example.com` the config would look like this:
 
 ```nix
-security.acme.acceptTerms = true;
-security.acme.defaults.email = "admin+acme@example.com";
-services.nginx = {
-  enable = true;
-  virtualHosts = {
-    "foo.example.com" = {
-      forceSSL = true;
-      enableACME = true;
-      # All serverAliases will be added as extra domain names on the certificate.
-      serverAliases = [ "bar.example.com" ];
-      locations."/" = {
-        root = "/var/www";
+{
+  security.acme.acceptTerms = true;
+  security.acme.defaults.email = "admin+acme@example.com";
+  services.nginx = {
+    enable = true;
+    virtualHosts = {
+      "foo.example.com" = {
+        forceSSL = true;
+        enableACME = true;
+        # All serverAliases will be added as extra domain names on the certificate.
+        serverAliases = [ "bar.example.com" ];
+        locations."/" = {
+          root = "/var/www";
+        };
       };
-    };
 
-    # We can also add a different vhost and reuse the same certificate
-    # but we have to append extraDomainNames manually beforehand:
-    # security.acme.certs."foo.example.com".extraDomainNames = [ "baz.example.com" ];
-    "baz.example.com" = {
-      forceSSL = true;
-      useACMEHost = "foo.example.com";
-      locations."/" = {
-        root = "/var/www";
+      # We can also add a different vhost and reuse the same certificate
+      # but we have to append extraDomainNames manually beforehand:
+      # security.acme.certs."foo.example.com".extraDomainNames = [ "baz.example.com" ];
+      "baz.example.com" = {
+        forceSSL = true;
+        useACMEHost = "foo.example.com";
+        locations."/" = {
+          root = "/var/www";
+        };
       };
     };
   };
-};
+}
 ```
 
 ## Using ACME certificates in Apache/httpd {#module-security-acme-httpd}
@@ -89,65 +91,69 @@ the intent that you will generate certs for all your vhosts and redirect
 everyone to HTTPS.
 
 ```nix
-security.acme.acceptTerms = true;
-security.acme.defaults.email = "admin+acme@example.com";
-
-# /var/lib/acme/.challenges must be writable by the ACME user
-# and readable by the Nginx user. The easiest way to achieve
-# this is to add the Nginx user to the ACME group.
-users.users.nginx.extraGroups = [ "acme" ];
-
-services.nginx = {
-  enable = true;
-  virtualHosts = {
-    "acmechallenge.example.com" = {
-      # Catchall vhost, will redirect users to HTTPS for all vhosts
-      serverAliases = [ "*.example.com" ];
-      locations."/.well-known/acme-challenge" = {
-        root = "/var/lib/acme/.challenges";
-      };
-      locations."/" = {
-        return = "301 https://$host$request_uri";
+{
+  security.acme.acceptTerms = true;
+  security.acme.defaults.email = "admin+acme@example.com";
+
+  # /var/lib/acme/.challenges must be writable by the ACME user
+  # and readable by the Nginx user. The easiest way to achieve
+  # this is to add the Nginx user to the ACME group.
+  users.users.nginx.extraGroups = [ "acme" ];
+
+  services.nginx = {
+    enable = true;
+    virtualHosts = {
+      "acmechallenge.example.com" = {
+        # Catchall vhost, will redirect users to HTTPS for all vhosts
+        serverAliases = [ "*.example.com" ];
+        locations."/.well-known/acme-challenge" = {
+          root = "/var/lib/acme/.challenges";
+        };
+        locations."/" = {
+          return = "301 https://$host$request_uri";
+        };
       };
     };
   };
-};
-# Alternative config for Apache
-users.users.wwwrun.extraGroups = [ "acme" ];
-services.httpd = {
-  enable = true;
-  virtualHosts = {
-    "acmechallenge.example.com" = {
-      # Catchall vhost, will redirect users to HTTPS for all vhosts
-      serverAliases = [ "*.example.com" ];
-      # /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
-      # By default, this is the case.
-      documentRoot = "/var/lib/acme/.challenges";
-      extraConfig = ''
-        RewriteEngine On
-        RewriteCond %{HTTPS} off
-        RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
-        RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
-      '';
+  # Alternative config for Apache
+  users.users.wwwrun.extraGroups = [ "acme" ];
+  services.httpd = {
+    enable = true;
+    virtualHosts = {
+      "acmechallenge.example.com" = {
+        # Catchall vhost, will redirect users to HTTPS for all vhosts
+        serverAliases = [ "*.example.com" ];
+        # /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
+        # By default, this is the case.
+        documentRoot = "/var/lib/acme/.challenges";
+        extraConfig = ''
+          RewriteEngine On
+          RewriteCond %{HTTPS} off
+          RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
+          RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
+        '';
+      };
     };
   };
-};
+}
 ```
 
 Now you need to configure ACME to generate a certificate.
 
 ```nix
-security.acme.certs."foo.example.com" = {
-  webroot = "/var/lib/acme/.challenges";
-  email = "foo@example.com";
-  # Ensure that the web server you use can read the generated certs
-  # Take a look at the group option for the web server you choose.
-  group = "nginx";
-  # Since we have a wildcard vhost to handle port 80,
-  # we can generate certs for anything!
-  # Just make sure your DNS resolves them.
-  extraDomainNames = [ "mail.example.com" ];
-};
+{
+  security.acme.certs."foo.example.com" = {
+    webroot = "/var/lib/acme/.challenges";
+    email = "foo@example.com";
+    # Ensure that the web server you use can read the generated certs
+    # Take a look at the group option for the web server you choose.
+    group = "nginx";
+    # Since we have a wildcard vhost to handle port 80,
+    # we can generate certs for anything!
+    # Just make sure your DNS resolves them.
+    extraDomainNames = [ "mail.example.com" ];
+  };
+}
 ```
 
 The private key {file}`key.pem` and certificate
@@ -168,31 +174,33 @@ for provider/server specific configuration values. For the sake of these
 docs, we will provide a fully self-hosted example using bind.
 
 ```nix
-services.bind = {
-  enable = true;
-  extraConfig = ''
-    include "/var/lib/secrets/dnskeys.conf";
-  '';
-  zones = [
-    rec {
-      name = "example.com";
-      file = "/var/db/bind/${name}";
-      master = true;
-      extraConfig = "allow-update { key rfc2136key.example.com.; };";
-    }
-  ];
-};
-
-# Now we can configure ACME
-security.acme.acceptTerms = true;
-security.acme.defaults.email = "admin+acme@example.com";
-security.acme.certs."example.com" = {
-  domain = "*.example.com";
-  dnsProvider = "rfc2136";
-  environmentFile = "/var/lib/secrets/certs.secret";
-  # We don't need to wait for propagation since this is a local DNS server
-  dnsPropagationCheck = false;
-};
+{
+  services.bind = {
+    enable = true;
+    extraConfig = ''
+      include "/var/lib/secrets/dnskeys.conf";
+    '';
+    zones = [
+      rec {
+        name = "example.com";
+        file = "/var/db/bind/${name}";
+        master = true;
+        extraConfig = "allow-update { key rfc2136key.example.com.; };";
+      }
+    ];
+  };
+
+  # Now we can configure ACME
+  security.acme.acceptTerms = true;
+  security.acme.defaults.email = "admin+acme@example.com";
+  security.acme.certs."example.com" = {
+    domain = "*.example.com";
+    dnsProvider = "rfc2136";
+    environmentFile = "/var/lib/secrets/certs.secret";
+    # We don't need to wait for propagation since this is a local DNS server
+    dnsPropagationCheck = false;
+  };
+}
 ```
 
 The {file}`dnskeys.conf` and {file}`certs.secret`
@@ -200,36 +208,38 @@ must be kept secure and thus you should not keep their contents in your
 Nix config. Instead, generate them one time with a systemd service:
 
 ```nix
-systemd.services.dns-rfc2136-conf = {
-  requiredBy = ["acme-example.com.service" "bind.service"];
-  before = ["acme-example.com.service" "bind.service"];
-  unitConfig = {
-    ConditionPathExists = "!/var/lib/secrets/dnskeys.conf";
-  };
-  serviceConfig = {
-    Type = "oneshot";
-    UMask = 0077;
+{
+  systemd.services.dns-rfc2136-conf = {
+    requiredBy = ["acme-example.com.service" "bind.service"];
+    before = ["acme-example.com.service" "bind.service"];
+    unitConfig = {
+      ConditionPathExists = "!/var/lib/secrets/dnskeys.conf";
+    };
+    serviceConfig = {
+      Type = "oneshot";
+      UMask = 0077;
+    };
+    path = [ pkgs.bind ];
+    script = ''
+      mkdir -p /var/lib/secrets
+      chmod 755 /var/lib/secrets
+      tsig-keygen rfc2136key.example.com > /var/lib/secrets/dnskeys.conf
+      chown named:root /var/lib/secrets/dnskeys.conf
+      chmod 400 /var/lib/secrets/dnskeys.conf
+
+      # extract secret value from the dnskeys.conf
+      while read x y; do if [ "$x" = "secret" ]; then secret="''${y:1:''${#y}-3}"; fi; done < /var/lib/secrets/dnskeys.conf
+
+      cat > /var/lib/secrets/certs.secret << EOF
+      RFC2136_NAMESERVER='127.0.0.1:53'
+      RFC2136_TSIG_ALGORITHM='hmac-sha256.'
+      RFC2136_TSIG_KEY='rfc2136key.example.com'
+      RFC2136_TSIG_SECRET='$secret'
+      EOF
+      chmod 400 /var/lib/secrets/certs.secret
+    '';
   };
-  path = [ pkgs.bind ];
-  script = ''
-    mkdir -p /var/lib/secrets
-    chmod 755 /var/lib/secrets
-    tsig-keygen rfc2136key.example.com > /var/lib/secrets/dnskeys.conf
-    chown named:root /var/lib/secrets/dnskeys.conf
-    chmod 400 /var/lib/secrets/dnskeys.conf
-
-    # extract secret value from the dnskeys.conf
-    while read x y; do if [ "$x" = "secret" ]; then secret="''${y:1:''${#y}-3}"; fi; done < /var/lib/secrets/dnskeys.conf
-
-    cat > /var/lib/secrets/certs.secret << EOF
-    RFC2136_NAMESERVER='127.0.0.1:53'
-    RFC2136_TSIG_ALGORITHM='hmac-sha256.'
-    RFC2136_TSIG_KEY='rfc2136key.example.com'
-    RFC2136_TSIG_SECRET='$secret'
-    EOF
-    chmod 400 /var/lib/secrets/certs.secret
-  '';
-};
+}
 ```
 
 Now you're all set to generate certs! You should monitor the first invocation
@@ -251,27 +261,29 @@ you will set them as defaults
 (e.g. [](#opt-security.acme.defaults.dnsProvider)).
 
 ```nix
-# Configure ACME appropriately
-security.acme.acceptTerms = true;
-security.acme.defaults.email = "admin+acme@example.com";
-security.acme.defaults = {
-  dnsProvider = "rfc2136";
-  environmentFile = "/var/lib/secrets/certs.secret";
-  # We don't need to wait for propagation since this is a local DNS server
-  dnsPropagationCheck = false;
-};
-
-# For each virtual host you would like to use DNS-01 validation with,
-# set acmeRoot = null
-services.nginx = {
-  enable = true;
-  virtualHosts = {
-    "foo.example.com" = {
-      enableACME = true;
-      acmeRoot = null;
+{
+  # Configure ACME appropriately
+  security.acme.acceptTerms = true;
+  security.acme.defaults.email = "admin+acme@example.com";
+  security.acme.defaults = {
+    dnsProvider = "rfc2136";
+    environmentFile = "/var/lib/secrets/certs.secret";
+    # We don't need to wait for propagation since this is a local DNS server
+    dnsPropagationCheck = false;
+  };
+
+  # For each virtual host you would like to use DNS-01 validation with,
+  # set acmeRoot = null
+  services.nginx = {
+    enable = true;
+    virtualHosts = {
+      "foo.example.com" = {
+        enableACME = true;
+        acmeRoot = null;
+      };
     };
   };
-};
+}
 ```
 
 And that's it! Next time your configuration is rebuilt, or when
@@ -288,39 +300,41 @@ Below is an example configuration for OpenSMTPD, but this pattern
 can be applied to any service.
 
 ```nix
-# Configure ACME however you like (DNS or HTTP validation), adding
-# the following configuration for the relevant certificate.
-# Note: You cannot use `systemctl reload` here as that would mean
-# the LoadCredential configuration below would be skipped and
-# the service would continue to use old certificates.
-security.acme.certs."mail.example.com".postRun = ''
-  systemctl restart opensmtpd
-'';
-
-# Now you must augment OpenSMTPD's systemd service to load
-# the certificate files.
-systemd.services.opensmtpd.requires = ["acme-finished-mail.example.com.target"];
-systemd.services.opensmtpd.serviceConfig.LoadCredential = let
-  certDir = config.security.acme.certs."mail.example.com".directory;
-in [
-  "cert.pem:${certDir}/cert.pem"
-  "key.pem:${certDir}/key.pem"
-];
-
-# Finally, configure OpenSMTPD to use these certs.
-services.opensmtpd = let
-  credsDir = "/run/credentials/opensmtpd.service";
-in {
-  enable = true;
-  setSendmail = false;
-  serverConfiguration = ''
-    pki mail.example.com cert "${credsDir}/cert.pem"
-    pki mail.example.com key "${credsDir}/key.pem"
-    listen on localhost tls pki mail.example.com
-    action act1 relay host smtp://127.0.0.1:10027
-    match for local action act1
+{
+  # Configure ACME however you like (DNS or HTTP validation), adding
+  # the following configuration for the relevant certificate.
+  # Note: You cannot use `systemctl reload` here as that would mean
+  # the LoadCredential configuration below would be skipped and
+  # the service would continue to use old certificates.
+  security.acme.certs."mail.example.com".postRun = ''
+    systemctl restart opensmtpd
   '';
-};
+
+  # Now you must augment OpenSMTPD's systemd service to load
+  # the certificate files.
+  systemd.services.opensmtpd.requires = ["acme-finished-mail.example.com.target"];
+  systemd.services.opensmtpd.serviceConfig.LoadCredential = let
+    certDir = config.security.acme.certs."mail.example.com".directory;
+  in [
+    "cert.pem:${certDir}/cert.pem"
+    "key.pem:${certDir}/key.pem"
+  ];
+
+  # Finally, configure OpenSMTPD to use these certs.
+  services.opensmtpd = let
+    credsDir = "/run/credentials/opensmtpd.service";
+  in {
+    enable = true;
+    setSendmail = false;
+    serverConfiguration = ''
+      pki mail.example.com cert "${credsDir}/cert.pem"
+      pki mail.example.com key "${credsDir}/key.pem"
+      listen on localhost tls pki mail.example.com
+      action act1 relay host smtp://127.0.0.1:10027
+      match for local action act1
+    '';
+  };
+}
 ```
 
 ## Regenerating certificates {#module-security-acme-regenerate}
diff --git a/nixpkgs/nixos/modules/services/audio/castopod.md b/nixpkgs/nixos/modules/services/audio/castopod.md
index ee8590737a7c..40838cc77aa6 100644
--- a/nixpkgs/nixos/modules/services/audio/castopod.md
+++ b/nixpkgs/nixos/modules/services/audio/castopod.md
@@ -7,16 +7,18 @@ Castopod is an open-source hosting platform made for podcasters who want to enga
 Use the following configuration to start a public instance of Castopod on `castopod.example.com` domain:
 
 ```nix
-networking.firewall.allowedTCPPorts = [ 80 443 ];
-services.castopod = {
-  enable = true;
-  database.createLocally = true;
-  nginx.virtualHost = {
-    serverName = "castopod.example.com";
-    enableACME = true;
-    forceSSL = true;
+{
+  networking.firewall.allowedTCPPorts = [ 80 443 ];
+  services.castopod = {
+    enable = true;
+    database.createLocally = true;
+    nginx.virtualHost = {
+      serverName = "castopod.example.com";
+      enableACME = true;
+      forceSSL = true;
+    };
   };
-};
+}
 ```
 
 Go to `https://castopod.example.com/cp-install` to create superadmin account after applying the above configuration.
diff --git a/nixpkgs/nixos/modules/services/audio/roon-server.nix b/nixpkgs/nixos/modules/services/audio/roon-server.nix
index 8691c08b0d36..8a6cf6ec6a41 100644
--- a/nixpkgs/nixos/modules/services/audio/roon-server.nix
+++ b/nixpkgs/nixos/modules/services/audio/roon-server.nix
@@ -9,6 +9,7 @@ in {
   options = {
     services.roon-server = {
       enable = mkEnableOption (lib.mdDoc "Roon Server");
+      package = lib.mkPackageOption pkgs "roon-server" { };
       openFirewall = mkOption {
         type = types.bool;
         default = false;
@@ -43,7 +44,7 @@ in {
       environment.ROON_ID_DIR = "/var/lib/${name}";
 
       serviceConfig = {
-        ExecStart = "${pkgs.roon-server}/bin/RoonServer";
+        ExecStart = "${lib.getExe cfg.package}";
         LimitNOFILE = 8192;
         User = cfg.user;
         Group = cfg.group;
diff --git a/nixpkgs/nixos/modules/services/backup/borgbackup.md b/nixpkgs/nixos/modules/services/backup/borgbackup.md
index 39141f6ec858..2c91174732e1 100644
--- a/nixpkgs/nixos/modules/services/backup/borgbackup.md
+++ b/nixpkgs/nixos/modules/services/backup/borgbackup.md
@@ -21,22 +21,21 @@ A complete list of options for the Borgbase module may be found
 ## Basic usage for a local backup {#opt-services-backup-borgbackup-local-directory}
 
 A very basic configuration for backing up to a locally accessible directory is:
-```
+```nix
 {
     opt.services.borgbackup.jobs = {
-      { rootBackup = {
-          paths = "/";
-          exclude = [ "/nix" "/path/to/local/repo" ];
-          repo = "/path/to/local/repo";
-          doInit = true;
-          encryption = {
-            mode = "repokey";
-            passphrase = "secret";
-          };
-          compression = "auto,lzma";
-          startAt = "weekly";
+      rootBackup = {
+        paths = "/";
+        exclude = [ "/nix" "/path/to/local/repo" ];
+        repo = "/path/to/local/repo";
+        doInit = true;
+        encryption = {
+          mode = "repokey";
+          passphrase = "secret";
         };
-      }
+        compression = "auto,lzma";
+        startAt = "weekly";
+      };
     };
 }
 ```
@@ -59,7 +58,7 @@ ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/
 ```
 
 Add the following snippet to your NixOS configuration:
-```
+```nix
 {
   services.borgbackup.repos = {
     my_borg_repo = {
@@ -80,7 +79,7 @@ that you have stored a secret passphrasse in the file
 {file}`/run/keys/borgbackup_passphrase`, which should be only
 accessible by root
 
-```
+```nix
 {
   services.borgbackup.jobs = {
     backupToLocalServer = {
@@ -96,7 +95,7 @@ accessible by root
       startAt = "hourly";
     };
   };
-};
+}
 ```
 
 The following few commands (run as root) let you test your backup.
diff --git a/nixpkgs/nixos/modules/services/databases/foundationdb.md b/nixpkgs/nixos/modules/services/databases/foundationdb.md
index 0815c139152f..9f7addc9c140 100644
--- a/nixpkgs/nixos/modules/services/databases/foundationdb.md
+++ b/nixpkgs/nixos/modules/services/databases/foundationdb.md
@@ -15,9 +15,11 @@ key-value store.
 
 To enable FoundationDB, add the following to your
 {file}`configuration.nix`:
-```
-services.foundationdb.enable = true;
-services.foundationdb.package = pkgs.foundationdb71; # FoundationDB 7.1.x
+```nix
+{
+  services.foundationdb.enable = true;
+  services.foundationdb.package = pkgs.foundationdb71; # FoundationDB 7.1.x
+}
 ```
 
 The {option}`services.foundationdb.package` option is required, and
@@ -109,8 +111,10 @@ default configuration. See below for more on scaling to increase this.
 FoundationDB stores all data for all server processes under
 {file}`/var/lib/foundationdb`. You can override this using
 {option}`services.foundationdb.dataDir`, e.g.
-```
-services.foundationdb.dataDir = "/data/fdb";
+```nix
+{
+  services.foundationdb.dataDir = "/data/fdb";
+}
 ```
 
 Similarly, logs are stored under {file}`/var/log/foundationdb`
@@ -265,8 +269,10 @@ directories.
 For example, to create backups in {command}`/opt/fdb-backups`, first
 set up the paths in the module options:
 
-```
-services.foundationdb.extraReadWritePaths = [ "/opt/fdb-backups" ];
+```nix
+{
+  services.foundationdb.extraReadWritePaths = [ "/opt/fdb-backups" ];
+}
 ```
 
 Restart the FoundationDB service, and it will now be able to write to this
diff --git a/nixpkgs/nixos/modules/services/databases/postgresql.md b/nixpkgs/nixos/modules/services/databases/postgresql.md
index 3ff1f00fa9cf..6cce8f542a53 100644
--- a/nixpkgs/nixos/modules/services/databases/postgresql.md
+++ b/nixpkgs/nixos/modules/services/databases/postgresql.md
@@ -15,9 +15,11 @@ PostgreSQL is an advanced, free relational database.
 ## Configuring {#module-services-postgres-configuring}
 
 To enable PostgreSQL, add the following to your {file}`configuration.nix`:
-```
-services.postgresql.enable = true;
-services.postgresql.package = pkgs.postgresql_15;
+```nix
+{
+  services.postgresql.enable = true;
+  services.postgresql.package = pkgs.postgresql_15;
+}
 ```
 Note that you are required to specify the desired version of PostgreSQL (e.g. `pkgs.postgresql_15`). Since upgrading your PostgreSQL version requires a database dump and reload (see below), NixOS cannot provide a default value for [](#opt-services.postgresql.package) such as the most recent release of PostgreSQL.
 
@@ -35,8 +37,10 @@ alice=>
 -->
 
 By default, PostgreSQL stores its databases in {file}`/var/lib/postgresql/$psqlSchema`. You can override this using [](#opt-services.postgresql.dataDir), e.g.
-```
-services.postgresql.dataDir = "/data/postgresql";
+```nix
+{
+  services.postgresql.dataDir = "/data/postgresql";
+}
 ```
 
 ## Initializing {#module-services-postgres-initializing}
@@ -95,16 +99,19 @@ databases from `ensureDatabases` and `extraUser1` from `ensureUsers`
 are already created.
 
 ```nix
+  {
     systemd.services.postgresql.postStart = lib.mkAfter ''
       $PSQL service1 -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
       $PSQL service1 -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
       # ....
     '';
+  }
 ```
 
 ##### in intermediate oneshot service {#module-services-postgres-initializing-extra-permissions-superuser-oneshot}
 
 ```nix
+  {
     systemd.services."migrate-service1-db1" = {
       serviceConfig.Type = "oneshot";
       requiredBy = "service1.service";
@@ -119,6 +126,7 @@ are already created.
         # ....
       '';
     };
+  }
 ```
 
 #### as service user {#module-services-postgres-initializing-extra-permissions-service-user}
@@ -130,6 +138,7 @@ are already created.
 ##### in service `preStart` {#module-services-postgres-initializing-extra-permissions-service-user-pre-start}
 
 ```nix
+  {
     environment.PSQL = "psql --port=${toString services.postgresql.port}";
     path = [ postgresql ];
     systemd.services."service1".preStart = ''
@@ -137,11 +146,13 @@ are already created.
       $PSQL -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
       # ....
     '';
+  }
 ```
 
 ##### in intermediate oneshot service {#module-services-postgres-initializing-extra-permissions-service-user-oneshot}
 
 ```nix
+  {
     systemd.services."migrate-service1-db1" = {
       serviceConfig.Type = "oneshot";
       requiredBy = "service1.service";
@@ -156,6 +167,7 @@ are already created.
         # ....
       '';
     };
+  }
 ```
 
 ## Upgrading {#module-services-postgres-upgrading}
@@ -174,7 +186,7 @@ $ nix-instantiate --eval -A postgresql_13.psqlSchema
 "13"
 ```
 For an upgrade, a script like this can be used to simplify the process:
-```
+```nix
 { config, pkgs, ... }:
 {
   environment.systemPackages = [
@@ -256,16 +268,18 @@ postgresql_15.pkgs.pg_partman        postgresql_15.pkgs.pgroonga
 ```
 
 To add plugins via NixOS configuration, set `services.postgresql.extraPlugins`:
-```
-services.postgresql.package = pkgs.postgresql_12;
-services.postgresql.extraPlugins = ps: with ps; [
-  pg_repack
-  postgis
-];
+```nix
+{
+  services.postgresql.package = pkgs.postgresql_12;
+  services.postgresql.extraPlugins = ps: with ps; [
+    pg_repack
+    postgis
+  ];
+}
 ```
 
 You can build custom PostgreSQL-with-plugins (to be used outside of NixOS) using function `.withPackages`. For example, creating a custom PostgreSQL package in an overlay can look like:
-```
+```nix
 self: super: {
   postgresql_custom = self.postgresql_12.withPackages (ps: [
     ps.pg_repack
@@ -275,7 +289,7 @@ self: super: {
 ```
 
 Here's a recipe on how to override a particular plugin through an overlay:
-```
+```nix
 self: super: {
   postgresql_15 = super.postgresql_15// {
     pkgs = super.postgresql_15.pkgs // {
diff --git a/nixpkgs/nixos/modules/services/databases/tigerbeetle.md b/nixpkgs/nixos/modules/services/databases/tigerbeetle.md
index 47394d443059..12d920e7bcc7 100644
--- a/nixpkgs/nixos/modules/services/databases/tigerbeetle.md
+++ b/nixpkgs/nixos/modules/services/databases/tigerbeetle.md
@@ -7,8 +7,10 @@
 TigerBeetle is a distributed financial accounting database designed for mission critical safety and performance.
 
 To enable TigerBeetle, add the following to your {file}`configuration.nix`:
-```
+```nix
+{
   services.tigerbeetle.enable = true;
+}
 ```
 
 When first started, the TigerBeetle service will create its data file at {file}`/var/lib/tigerbeetle` unless the file already exists, in which case it will just use the existing file.
@@ -20,13 +22,15 @@ By default, TigerBeetle will only listen on a local interface.
 To configure it to listen on a different interface (and to configure it to connect to other replicas, if you're creating more than one), you'll have to set the `addresses` option.
 Note that the TigerBeetle module won't open any firewall ports automatically, so if you configure it to listen on an external interface, you'll need to ensure that connections can reach it:
 
-```
+```nix
+{
   services.tigerbeetle = {
     enable = true;
     addresses = [ "0.0.0.0:3001" ];
   };
 
   networking.firewall.allowedTCPPorts = [ 3001 ];
+}
 ```
 
 A complete list of options for TigerBeetle can be found [here](#opt-services.tigerbeetle.enable).
diff --git a/nixpkgs/nixos/modules/services/desktops/flatpak.md b/nixpkgs/nixos/modules/services/desktops/flatpak.md
index af71d85b5a15..5299b32a03c7 100644
--- a/nixpkgs/nixos/modules/services/desktops/flatpak.md
+++ b/nixpkgs/nixos/modules/services/desktops/flatpak.md
@@ -8,17 +8,21 @@ Flatpak is a system for building, distributing, and running sandboxed desktop
 applications on Linux.
 
 To enable Flatpak, add the following to your {file}`configuration.nix`:
-```
+```nix
+{
   services.flatpak.enable = true;
+}
 ```
 
 For the sandboxed apps to work correctly, desktop integration portals need to
 be installed. If you run GNOME, this will be handled automatically for you;
 in other cases, you will need to add something like the following to your
 {file}`configuration.nix`:
-```
+```nix
+{
   xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
   xdg.portal.config.common.default = "gtk";
+}
 ```
 
 Then, you will need to add a repository, for example,
diff --git a/nixpkgs/nixos/modules/services/desktops/flatpak.nix b/nixpkgs/nixos/modules/services/desktops/flatpak.nix
index 4c26e6874023..62ef38a3d554 100644
--- a/nixpkgs/nixos/modules/services/desktops/flatpak.nix
+++ b/nixpkgs/nixos/modules/services/desktops/flatpak.nix
@@ -32,6 +32,8 @@ in {
 
     security.polkit.enable = true;
 
+    fonts.fontDir.enable = true;
+
     services.dbus.packages = [ pkgs.flatpak ];
 
     systemd.packages = [ pkgs.flatpak ];
diff --git a/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix b/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix
index 182615cd4d6c..5c6eba889ebe 100644
--- a/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix
+++ b/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix
@@ -1,14 +1,21 @@
 # PipeWire service.
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (builtins) attrNames concatMap length;
+  inherit (lib) maintainers teams;
+  inherit (lib.attrsets) attrByPath attrsToList concatMapAttrs filterAttrs;
+  inherit (lib.lists) flatten optional optionals;
+  inherit (lib.modules) mkIf mkRemovedOptionModule;
+  inherit (lib.options) literalExpression mkEnableOption mkOption mkPackageOption;
+  inherit (lib.strings) concatMapStringsSep hasPrefix optionalString;
+  inherit (lib.types) attrsOf bool listOf package;
+
   json = pkgs.formats.json {};
   mapToFiles = location: config: concatMapAttrs (name: value: { "share/pipewire/${location}.conf.d/${name}.conf" = json.generate "${name}" value; }) config;
   extraConfigPkgFromFiles = locations: filesSet: pkgs.runCommand "pipewire-extra-config" { } ''
-    mkdir -p ${lib.concatMapStringsSep " " (l: "$out/share/pipewire/${l}.conf.d") locations}
-    ${lib.concatMapStringsSep ";" ({name, value}: "ln -s ${value} $out/${name}") (lib.attrsToList filesSet)}
+    mkdir -p ${concatMapStringsSep " " (l: "$out/share/pipewire/${l}.conf.d") locations}
+    ${concatMapStringsSep ";" ({name, value}: "ln -s ${value} $out/${name}") (attrsToList filesSet)}
   '';
   cfg = config.services.pipewire;
   enable32BitAlsaPlugins = cfg.alsa.support32Bit
@@ -40,15 +47,15 @@ let
     name = "pipewire-configs";
     paths = configPackages
       ++ [ extraConfigPkg ]
-      ++ lib.optionals cfg.wireplumber.enable cfg.wireplumber.configPackages;
+      ++ optionals cfg.wireplumber.enable cfg.wireplumber.configPackages;
     pathsToLink = [ "/share/pipewire" ];
   };
 
-  requiredLv2Packages = lib.flatten
+  requiredLv2Packages = flatten
     (
-      lib.concatMap
+      concatMap
       (p:
-        lib.attrByPath ["passthru" "requiredLv2Packages"] [] p
+        attrByPath ["passthru" "requiredLv2Packages"] [] p
       )
       configPackages
     );
@@ -59,58 +66,58 @@ let
     pathsToLink = [ "/lib/lv2" ];
   };
 in {
-  meta.maintainers = teams.freedesktop.members ++ [ lib.maintainers.k900 ];
+  meta.maintainers = teams.freedesktop.members ++ [ maintainers.k900 ];
 
   ###### interface
   options = {
     services.pipewire = {
-      enable = mkEnableOption (lib.mdDoc "PipeWire service");
+      enable = mkEnableOption "PipeWire service";
 
       package = mkPackageOption pkgs "pipewire" { };
 
       socketActivation = mkOption {
         default = true;
-        type = types.bool;
-        description = lib.mdDoc ''
+        type = bool;
+        description = ''
           Automatically run PipeWire when connections are made to the PipeWire socket.
         '';
       };
 
       audio = {
-        enable = lib.mkOption {
-          type = lib.types.bool;
+        enable = mkOption {
+          type = bool;
           # this is for backwards compatibility
           default = cfg.alsa.enable || cfg.jack.enable || cfg.pulse.enable;
-          defaultText = lib.literalExpression "config.services.pipewire.alsa.enable || config.services.pipewire.jack.enable || config.services.pipewire.pulse.enable";
-          description = lib.mdDoc "Whether to use PipeWire as the primary sound server";
+          defaultText = literalExpression "config.services.pipewire.alsa.enable || config.services.pipewire.jack.enable || config.services.pipewire.pulse.enable";
+          description = "Whether to use PipeWire as the primary sound server";
         };
       };
 
       alsa = {
-        enable = mkEnableOption (lib.mdDoc "ALSA support");
-        support32Bit = mkEnableOption (lib.mdDoc "32-bit ALSA support on 64-bit systems");
+        enable = mkEnableOption "ALSA support";
+        support32Bit = mkEnableOption "32-bit ALSA support on 64-bit systems";
       };
 
       jack = {
-        enable = mkEnableOption (lib.mdDoc "JACK audio emulation");
+        enable = mkEnableOption "JACK audio emulation";
       };
 
       raopOpenFirewall = mkOption {
-        type = lib.types.bool;
+        type = bool;
         default = false;
-        description = lib.mdDoc ''
+        description = ''
           Opens UDP/6001-6002, required by RAOP/Airplay for timing and control data.
         '';
       };
 
       pulse = {
-        enable = mkEnableOption (lib.mdDoc "PulseAudio server emulation");
+        enable = mkEnableOption "PulseAudio server emulation";
       };
 
-      systemWide = lib.mkOption {
-        type = lib.types.bool;
+      systemWide = mkOption {
+        type = bool;
         default = false;
-        description = lib.mdDoc ''
+        description = ''
           If true, a system-wide PipeWire service and socket is enabled
           allowing all users in the "pipewire" group to use it simultaneously.
           If false, then user units are used instead, restricting access to
@@ -124,7 +131,7 @@ in {
 
       extraConfig = {
         pipewire = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-clock-rate" = {
@@ -138,7 +145,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire server.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire.conf.d`.
@@ -157,7 +164,7 @@ in {
           '';
         };
         client = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-no-resample" = {
@@ -166,7 +173,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire client library, used by most applications.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client.conf.d`.
@@ -177,7 +184,7 @@ in {
           '';
         };
         client-rt = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-alsa-linear-volume" = {
@@ -186,7 +193,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire client library, used by real-time applications and legacy ALSA clients.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client-rt.conf.d`.
@@ -198,7 +205,7 @@ in {
           '';
         };
         jack = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "20-hide-midi" = {
@@ -207,7 +214,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire JACK server and client library.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/jack.conf.d`.
@@ -218,7 +225,7 @@ in {
           '';
         };
         pipewire-pulse = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "15-force-s16-info" = {
@@ -232,7 +239,7 @@ in {
               }];
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire PulseAudio server.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire-pulse.conf.d`.
@@ -248,10 +255,32 @@ in {
         };
       };
 
-      configPackages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      configPackages = mkOption {
+        type = listOf package;
         default = [];
-        description = lib.mdDoc ''
+        example = literalExpression ''[
+          (pkgs.writeTextDir "share/pipewire/pipewire.conf.d/10-loopback.conf" '''
+            context.modules = [
+            {   name = libpipewire-module-loopback
+                args = {
+                  node.description = "Scarlett Focusrite Line 1"
+                  capture.props = {
+                      audio.position = [ FL ]
+                      stream.dont-remix = true
+                      node.target = "alsa_input.usb-Focusrite_Scarlett_Solo_USB_Y7ZD17C24495BC-00.analog-stereo"
+                      node.passive = true
+                  }
+                  playback.props = {
+                      node.name = "SF_mono_in_1"
+                      media.class = "Audio/Source"
+                      audio.position = [ MONO ]
+                  }
+                }
+            }
+            ]
+          ''')
+        ]'';
+        description = ''
           List of packages that provide PipeWire configuration, in the form of
           `share/pipewire/*/*.conf` files.
 
@@ -260,11 +289,11 @@ in {
         '';
       };
 
-      extraLv2Packages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      extraLv2Packages = mkOption {
+        type = listOf package;
         default = [];
-        example = lib.literalExpression "[ pkgs.lsp-plugins ]";
-        description = lib.mdDoc ''
+        example = literalExpression "[ pkgs.lsp-plugins ]";
+        description = ''
           List of packages that provide LV2 plugins in `lib/lv2` that should
           be made available to PipeWire for [filter chains][wiki-filter-chain].
 
@@ -279,11 +308,11 @@ in {
   };
 
   imports = [
-    (lib.mkRemovedOptionModule ["services" "pipewire" "config"] ''
+    (mkRemovedOptionModule ["services" "pipewire" "config"] ''
       Overriding default PipeWire configuration through NixOS options never worked correctly and is no longer supported.
       Please create drop-in configuration files via `services.pipewire.extraConfig` instead.
     '')
-    (lib.mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
+    (mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
       pipewire-media-session is no longer supported upstream and has been removed.
       Please switch to `services.pipewire.wireplumber` instead.
     '')
@@ -306,12 +335,12 @@ in {
         message = "Using PipeWire's ALSA/PulseAudio compatibility layers requires running PipeWire as the sound server. Set `services.pipewire.audio.enable` to true.";
       }
       {
-        assertion = builtins.length
-          (builtins.attrNames
+        assertion = length
+          (attrNames
             (
-              lib.filterAttrs
+              filterAttrs
                 (name: value:
-                  lib.hasPrefix "pipewire/" name || name == "pipewire"
+                  hasPrefix "pipewire/" name || name == "pipewire"
                 )
                 config.environment.etc
             )) == 1;
@@ -320,7 +349,7 @@ in {
     ];
 
     environment.systemPackages = [ cfg.package ]
-                                 ++ lib.optional cfg.jack.enable jack-libs;
+                                 ++ optional cfg.jack.enable jack-libs;
 
     systemd.packages = [ cfg.package ];
 
@@ -336,16 +365,16 @@ in {
     systemd.user.sockets.pipewire.enable = !cfg.systemWide;
     systemd.user.services.pipewire.enable = !cfg.systemWide;
 
-    systemd.services.pipewire.environment.LV2_PATH = lib.mkIf cfg.systemWide "${lv2Plugins}/lib/lv2";
-    systemd.user.services.pipewire.environment.LV2_PATH = lib.mkIf (!cfg.systemWide) "${lv2Plugins}/lib/lv2";
+    systemd.services.pipewire.environment.LV2_PATH = mkIf cfg.systemWide "${lv2Plugins}/lib/lv2";
+    systemd.user.services.pipewire.environment.LV2_PATH = mkIf (!cfg.systemWide) "${lv2Plugins}/lib/lv2";
 
     # Mask pw-pulse if it's not wanted
     systemd.user.services.pipewire-pulse.enable = cfg.pulse.enable;
     systemd.user.sockets.pipewire-pulse.enable = cfg.pulse.enable;
 
-    systemd.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
-    systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
-    systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.sockets.pipewire.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.user.sockets.pipewire.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.user.sockets.pipewire-pulse.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
 
     services.udev.packages = [ cfg.package ];
 
@@ -377,18 +406,18 @@ in {
     };
 
     environment.sessionVariables.LD_LIBRARY_PATH =
-      lib.mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
+      mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
 
-    networking.firewall.allowedUDPPorts = lib.mkIf cfg.raopOpenFirewall [ 6001 6002 ];
+    networking.firewall.allowedUDPPorts = mkIf cfg.raopOpenFirewall [ 6001 6002 ];
 
-    users = lib.mkIf cfg.systemWide {
+    users = mkIf cfg.systemWide {
       users.pipewire = {
         uid = config.ids.uids.pipewire;
         group = "pipewire";
         extraGroups = [
           "audio"
           "video"
-        ] ++ lib.optional config.security.rtkit.enable "rtkit";
+        ] ++ optional config.security.rtkit.enable "rtkit";
         description = "PipeWire system service user";
         isSystemUser = true;
         home = "/var/lib/pipewire";
diff --git a/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix b/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix
index de177d0e4ef3..6ab62eb03c25 100644
--- a/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix
+++ b/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix
@@ -1,46 +1,65 @@
 { config, lib, pkgs, ... }:
 
 let
+  inherit (builtins) attrNames concatMap length;
+  inherit (lib) maintainers;
+  inherit (lib.attrsets) attrByPath filterAttrs;
+  inherit (lib.lists) flatten optional;
+  inherit (lib.modules) mkIf;
+  inherit (lib.options) literalExpression mkOption;
+  inherit (lib.strings) hasPrefix;
+  inherit (lib.types) bool listOf package;
+
   pwCfg = config.services.pipewire;
   cfg = pwCfg.wireplumber;
   pwUsedForAudio = pwCfg.audio.enable;
 in
 {
-  meta.maintainers = [ lib.maintainers.k900 ];
+  meta.maintainers = [ maintainers.k900 ];
 
   options = {
     services.pipewire.wireplumber = {
-      enable = lib.mkOption {
-        type = lib.types.bool;
-        default = config.services.pipewire.enable;
-        defaultText = lib.literalExpression "config.services.pipewire.enable";
-        description = lib.mdDoc "Whether to enable WirePlumber, a modular session / policy manager for PipeWire";
+      enable = mkOption {
+        type = bool;
+        default = pwCfg.enable;
+        defaultText = literalExpression "config.services.pipewire.enable";
+        description = "Whether to enable WirePlumber, a modular session / policy manager for PipeWire";
       };
 
-      package = lib.mkOption {
-        type = lib.types.package;
+      package = mkOption {
+        type = package;
         default = pkgs.wireplumber;
-        defaultText = lib.literalExpression "pkgs.wireplumber";
-        description = lib.mdDoc "The WirePlumber derivation to use.";
+        defaultText = literalExpression "pkgs.wireplumber";
+        description = "The WirePlumber derivation to use.";
       };
 
-      configPackages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      configPackages = mkOption {
+        type = listOf package;
         default = [ ];
-        description = lib.mdDoc ''
+        example = literalExpression ''[
+          (pkgs.writeTextDir "share/wireplumber/wireplumber.conf.d/10-bluez.conf" '''
+            monitor.bluez.properties = {
+              bluez5.roles = [ a2dp_sink a2dp_source bap_sink bap_source hsp_hs hsp_ag hfp_hf hfp_ag ]
+              bluez5.codecs = [ sbc sbc_xq aac ]
+              bluez5.enable-sbc-xq = true
+              bluez5.hfphsp-backend = "native"
+            }
+          ''')
+        ]'';
+        description = ''
           List of packages that provide WirePlumber configuration, in the form of
-          `share/wireplumber/*/*.lua` files.
+          `share/wireplumber/*/*.conf` files.
 
           LV2 dependencies will be picked up from config packages automatically
           via `passthru.requiredLv2Packages`.
         '';
       };
 
-      extraLv2Packages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      extraLv2Packages = mkOption {
+        type = listOf package;
         default = [];
-        example = lib.literalExpression "[ pkgs.lsp-plugins ]";
-        description = lib.mdDoc ''
+        example = literalExpression "[ pkgs.lsp-plugins ]";
+        description = ''
           List of packages that provide LV2 plugins in `lib/lv2` that should
           be made available to WirePlumber for [filter chains][wiki-filter-chain].
 
@@ -78,8 +97,8 @@ in
       '';
 
       configPackages = cfg.configPackages
-          ++ lib.optional (!pwUsedForAudio) pwNotForAudioConfigPkg
-          ++ lib.optional config.services.pipewire.systemWide systemwideConfigPkg;
+          ++ optional (!pwUsedForAudio) pwNotForAudioConfigPkg
+          ++ optional pwCfg.systemWide systemwideConfigPkg;
 
       configs = pkgs.buildEnv {
         name = "wireplumber-configs";
@@ -87,11 +106,11 @@ in
         pathsToLink = [ "/share/wireplumber" ];
       };
 
-      requiredLv2Packages = lib.flatten
+      requiredLv2Packages = flatten
         (
-          lib.concatMap
+          concatMap
             (p:
-              lib.attrByPath ["passthru" "requiredLv2Packages"] [] p
+              attrByPath ["passthru" "requiredLv2Packages"] [] p
             )
             configPackages
         );
@@ -102,19 +121,19 @@ in
         pathsToLink = [ "/lib/lv2" ];
       };
     in
-    lib.mkIf cfg.enable {
+    mkIf cfg.enable {
       assertions = [
         {
           assertion = !config.hardware.bluetooth.hsphfpd.enable;
           message = "Using WirePlumber conflicts with hsphfpd, as it provides the same functionality. `hardware.bluetooth.hsphfpd.enable` needs be set to false";
         }
         {
-          assertion = builtins.length
-            (builtins.attrNames
+          assertion = length
+            (attrNames
               (
-                lib.filterAttrs
+                filterAttrs
                   (name: value:
-                    lib.hasPrefix "wireplumber/" name || name == "wireplumber"
+                    hasPrefix "wireplumber/" name || name == "wireplumber"
                   )
                   config.environment.etc
               )) == 1;
@@ -128,19 +147,19 @@ in
 
       systemd.packages = [ cfg.package ];
 
-      systemd.services.wireplumber.enable = config.services.pipewire.systemWide;
-      systemd.user.services.wireplumber.enable = !config.services.pipewire.systemWide;
+      systemd.services.wireplumber.enable = pwCfg.systemWide;
+      systemd.user.services.wireplumber.enable = !pwCfg.systemWide;
 
       systemd.services.wireplumber.wantedBy = [ "pipewire.service" ];
       systemd.user.services.wireplumber.wantedBy = [ "pipewire.service" ];
 
-      systemd.services.wireplumber.environment = lib.mkIf config.services.pipewire.systemWide {
+      systemd.services.wireplumber.environment = mkIf pwCfg.systemWide {
         # Force WirePlumber to use system dbus.
         DBUS_SESSION_BUS_ADDRESS = "unix:path=/run/dbus/system_bus_socket";
         LV2_PATH = "${lv2Plugins}/lib/lv2";
       };
 
       systemd.user.services.wireplumber.environment.LV2_PATH =
-        lib.mkIf (!config.services.pipewire.systemWide) "${lv2Plugins}/lib/lv2";
+        mkIf (!pwCfg.systemWide) "${lv2Plugins}/lib/lv2";
     };
 }
diff --git a/nixpkgs/nixos/modules/services/development/athens.md b/nixpkgs/nixos/modules/services/development/athens.md
index 77663db509d5..2795930b0a02 100644
--- a/nixpkgs/nixos/modules/services/development/athens.md
+++ b/nixpkgs/nixos/modules/services/development/athens.md
@@ -18,7 +18,7 @@ A complete list of options for the Athens module may be found
 ## Basic usage for a caching proxy configuration {#opt-services-development-athens-caching-proxy}
 
 A very basic configuration for Athens that acts as a caching and forwarding HTTP proxy is:
-```
+```nix
 {
     services.athens = {
       enable = true;
@@ -28,7 +28,7 @@ A very basic configuration for Athens that acts as a caching and forwarding HTTP
 
 If you want to prevent Athens from writing to disk, you can instead configure it to cache modules only in memory:
 
-```
+```nix
 {
     services.athens = {
       enable = true;
@@ -39,10 +39,10 @@ If you want to prevent Athens from writing to disk, you can instead configure it
 
 To use the local proxy in Go builds, you can set the proxy as environment variable:
 
-```
+```nix
 {
   environment.variables = {
-    GOPROXY = "http://localhost:3000"
+    GOPROXY = "http://localhost:3000";
   };
 }
 ```
diff --git a/nixpkgs/nixos/modules/services/development/blackfire.md b/nixpkgs/nixos/modules/services/development/blackfire.md
index e2e7e4780c79..5a7fbe68f7d2 100644
--- a/nixpkgs/nixos/modules/services/development/blackfire.md
+++ b/nixpkgs/nixos/modules/services/development/blackfire.md
@@ -7,7 +7,7 @@
 [Blackfire](https://blackfire.io) is a proprietary tool for profiling applications. There are several languages supported by the product but currently only PHP support is packaged in Nixpkgs. The back-end consists of a module that is loaded into the language runtime (called *probe*) and a service (*agent*) that the probe connects to and that sends the profiles to the server.
 
 To use it, you will need to enable the agent and the probe on your server. The exact method will depend on the way you use PHP but here is an example of NixOS configuration for PHP-FPM:
-```
+```nix
 let
   php = pkgs.php.withExtensions ({ enabled, all }: enabled ++ (with all; [
     blackfire
diff --git a/nixpkgs/nixos/modules/services/development/livebook.md b/nixpkgs/nixos/modules/services/development/livebook.md
index 5315f2c2755a..aac9c58d081c 100644
--- a/nixpkgs/nixos/modules/services/development/livebook.md
+++ b/nixpkgs/nixos/modules/services/development/livebook.md
@@ -9,7 +9,7 @@ Enabling the `livebook` service creates a user
 [`systemd`](https://www.freedesktop.org/wiki/Software/systemd/) unit
 which runs the server.
 
-```
+```nix
 { ... }:
 
 {
@@ -51,6 +51,8 @@ some features require additional packages.  For example, the machine
 learning Kinos require `gcc` and `gnumake`.  To add these, use
 `extraPackages`:
 
-```
-services.livebook.extraPackages = with pkgs; [ gcc gnumake ];
+```nix
+{
+  services.livebook.extraPackages = with pkgs; [ gcc gnumake ];
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/editors/emacs.md b/nixpkgs/nixos/modules/services/editors/emacs.md
index 02f47b098d86..885f927422bd 100644
--- a/nixpkgs/nixos/modules/services/editors/emacs.md
+++ b/nixpkgs/nixos/modules/services/editors/emacs.md
@@ -178,7 +178,7 @@ file {file}`configuration.nix` to make it contain:
 ::: {.example #module-services-emacs-configuration-nix}
 ### Custom Emacs in `configuration.nix`
 
-```
+```nix
 {
  environment.systemPackages = [
    # [...]
@@ -203,7 +203,7 @@ adding it to your {file}`~/.config/nixpkgs/config.nix` (see
 ::: {.example #module-services-emacs-config-nix}
 ### Custom Emacs in `~/.config/nixpkgs/config.nix`
 
-```
+```nix
 {
   packageOverrides = super: let self = super.pkgs; in {
     myemacs = import ./emacs.nix { pkgs = self; };
@@ -228,7 +228,7 @@ only use {command}`emacsclient`), you can change your file
 ::: {.example #ex-emacsGtk3Nix}
 ### Custom Emacs build
 
-```
+```nix
 { pkgs ? import <nixpkgs> {} }:
 let
   myEmacs = (pkgs.emacs.override {
@@ -242,7 +242,7 @@ let
       rm $out/share/applications/emacs.desktop
     '';
   });
-in [...]
+in [ /* ... */ ]
 ```
 :::
 
@@ -262,8 +262,10 @@ with the user's login session.
 
 To install and enable the {command}`systemd` user service for Emacs
 daemon, add the following to your {file}`configuration.nix`:
-```
-services.emacs.enable = true;
+```nix
+{
+  services.emacs.enable = true;
+}
 ```
 
 The {var}`services.emacs.package` option allows a custom
@@ -323,9 +325,11 @@ In general, {command}`systemd` user services are globally enabled
 by symlinks in {file}`/etc/systemd/user`. In the case where
 Emacs daemon is not wanted for all users, it is possible to install the
 service but not globally enable it:
-```
-services.emacs.enable = false;
-services.emacs.install = true;
+```nix
+{
+  services.emacs.enable = false;
+  services.emacs.install = true;
+}
 ```
 
 To enable the {command}`systemd` user service for just the
diff --git a/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix
index d29e0f542f55..a4f93221475d 100644
--- a/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix
+++ b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix
@@ -94,7 +94,7 @@ in
       { source = "${etcFiles}/etc/opt/brother/scanner/brscan5"; };
     environment.etc."opt/brother/scanner/models" =
       { source = "${etcFiles}/etc/opt/brother/scanner/brscan5/models"; };
-    environment.etc."sane.d/dll.d/brother5.conf".source = "${pkgs.brscan5}/etc/sane.d/dll.d/brother.conf";
+    environment.etc."sane.d/dll.d/brother5.conf".source = "${pkgs.brscan5}/etc/sane.d/dll.d/brother5.conf";
 
     assertions = [
       { assertion = all (x: !(null != x.ip && null != x.nodename)) netDeviceList;
diff --git a/nixpkgs/nixos/modules/services/mail/mailman.md b/nixpkgs/nixos/modules/services/mail/mailman.md
index 55b61f8a2582..446aa1f921b6 100644
--- a/nixpkgs/nixos/modules/services/mail/mailman.md
+++ b/nixpkgs/nixos/modules/services/mail/mailman.md
@@ -9,7 +9,7 @@ an existing, securely configured Postfix setup, as it does not automatically con
 ## Basic usage with Postfix {#module-services-mailman-basic-usage}
 
 For a basic configuration with Postfix as the MTA, the following settings are suggested:
-```
+```nix
 { config, ... }: {
   services.postfix = {
     enable = true;
@@ -50,7 +50,7 @@ necessary, but outside the scope of the Mailman module.
 ## Using with other MTAs {#module-services-mailman-other-mtas}
 
 Mailman also supports other MTA, though with a little bit more configuration. For example, to use Mailman with Exim, you can use the following settings:
-```
+```nix
 { config, ... }: {
   services = {
     mailman = {
diff --git a/nixpkgs/nixos/modules/services/matrix/maubot.md b/nixpkgs/nixos/modules/services/matrix/maubot.md
index f6a05db56caf..d49066057a23 100644
--- a/nixpkgs/nixos/modules/services/matrix/maubot.md
+++ b/nixpkgs/nixos/modules/services/matrix/maubot.md
@@ -10,7 +10,9 @@ framework for Matrix.
 2. If you want to use PostgreSQL instead of SQLite, do this:
 
    ```nix
-   services.maubot.settings.database = "postgresql://maubot@localhost/maubot";
+   {
+     services.maubot.settings.database = "postgresql://maubot@localhost/maubot";
+   }
    ```
 
    If the PostgreSQL connection requires a password, you will have to
@@ -18,54 +20,58 @@ framework for Matrix.
 3. If you plan to expose your Maubot interface to the web, do something
    like this:
    ```nix
-   services.nginx.virtualHosts."matrix.example.org".locations = {
-     "/_matrix/maubot/" = {
-       proxyPass = "http://127.0.0.1:${toString config.services.maubot.settings.server.port}";
-       proxyWebsockets = true;
+   {
+     services.nginx.virtualHosts."matrix.example.org".locations = {
+       "/_matrix/maubot/" = {
+         proxyPass = "http://127.0.0.1:${toString config.services.maubot.settings.server.port}";
+         proxyWebsockets = true;
+       };
      };
-   };
-   services.maubot.settings.server.public_url = "matrix.example.org";
-   # do the following only if you want to use something other than /_matrix/maubot...
-   services.maubot.settings.server.ui_base_path = "/another/base/path";
+     services.maubot.settings.server.public_url = "matrix.example.org";
+     # do the following only if you want to use something other than /_matrix/maubot...
+     services.maubot.settings.server.ui_base_path = "/another/base/path";
+   }
    ```
 4. Optionally, set `services.maubot.pythonPackages` to a list of python3
    packages to make available for Maubot plugins.
 5. Optionally, set `services.maubot.plugins` to a list of Maubot
    plugins (full list available at https://plugins.maubot.xyz/):
    ```nix
-   services.maubot.plugins = with config.services.maubot.package.plugins; [
-     reactbot
-     # This will only change the default config! After you create a
-     # plugin instance, the default config will be copied into that
-     # instance's config in Maubot's database, and further base config
-     # changes won't affect the running plugin.
-     (rss.override {
-       base_config = {
-         update_interval = 60;
-         max_backoff = 7200;
-         spam_sleep = 2;
-         command_prefix = "rss";
-         admins = [ "@chayleaf:pavluk.org" ];
-       };
-     })
-   ];
-   # ...or...
-   services.maubot.plugins = config.services.maubot.package.plugins.allOfficialPlugins;
-   # ...or...
-   services.maubot.plugins = config.services.maubot.package.plugins.allPlugins;
-   # ...or...
-   services.maubot.plugins = with config.services.maubot.package.plugins; [
-     (weather.override {
-       # you can pass base_config as a string
-       base_config = ''
-         default_location: New York
-         default_units: M
-         default_language:
-         show_link: true
-         show_image: false
-       '';
-     })
-   ];
+   {
+     services.maubot.plugins = with config.services.maubot.package.plugins; [
+       reactbot
+       # This will only change the default config! After you create a
+       # plugin instance, the default config will be copied into that
+       # instance's config in Maubot's database, and further base config
+       # changes won't affect the running plugin.
+       (rss.override {
+         base_config = {
+           update_interval = 60;
+           max_backoff = 7200;
+           spam_sleep = 2;
+           command_prefix = "rss";
+           admins = [ "@chayleaf:pavluk.org" ];
+         };
+       })
+     ];
+     # ...or...
+     services.maubot.plugins = config.services.maubot.package.plugins.allOfficialPlugins;
+     # ...or...
+     services.maubot.plugins = config.services.maubot.package.plugins.allPlugins;
+     # ...or...
+     services.maubot.plugins = with config.services.maubot.package.plugins; [
+       (weather.override {
+         # you can pass base_config as a string
+         base_config = ''
+           default_location: New York
+           default_units: M
+           default_language:
+           show_link: true
+           show_image: false
+         '';
+       })
+     ];
+   }
    ```
 6. Start Maubot at least once before doing the following steps (it's
    necessary to generate the initial config).
diff --git a/nixpkgs/nixos/modules/services/matrix/mjolnir.md b/nixpkgs/nixos/modules/services/matrix/mjolnir.md
index f6994eeb8fa5..2594f05ce27b 100644
--- a/nixpkgs/nixos/modules/services/matrix/mjolnir.md
+++ b/nixpkgs/nixos/modules/services/matrix/mjolnir.md
@@ -46,7 +46,7 @@ autoconfigure a new Pantalaimon instance, which will connect to the homeserver
 set in [services.mjolnir.homeserverUrl](#opt-services.mjolnir.homeserverUrl) and Mjolnir itself
 will be configured to connect to the new Pantalaimon instance.
 
-```
+```nix
 {
   services.mjolnir = {
     enable = true;
@@ -78,7 +78,7 @@ uses across an entire homeserver.
 To use the Antispam Module, add `matrix-synapse-plugins.matrix-synapse-mjolnir-antispam`
 to the Synapse plugin list and enable the `mjolnir.Module` module.
 
-```
+```nix
 {
   services.matrix-synapse = {
     plugins = with pkgs; [
diff --git a/nixpkgs/nixos/modules/services/matrix/synapse.md b/nixpkgs/nixos/modules/services/matrix/synapse.md
index 9c9c025fc5f5..7f6587ce09df 100644
--- a/nixpkgs/nixos/modules/services/matrix/synapse.md
+++ b/nixpkgs/nixos/modules/services/matrix/synapse.md
@@ -23,7 +23,7 @@ synapse server for the `example.org` domain, served from
 the host `myhostname.example.org`. For more information,
 please refer to the
 [installation instructions of Synapse](https://element-hq.github.io/synapse/latest/setup/installation.html) .
-```
+```nix
 { pkgs, lib, config, ... }:
 let
   fqdn = "${config.networking.hostName}.${config.networking.domain}";
@@ -158,7 +158,7 @@ in an additional file like this:
     by `matrix-synapse`.
   - Include the file like this in your configuration:
 
-    ```
+    ```nix
     {
       services.matrix-synapse.extraConfigFiles = [
         "/run/secrets/matrix-shared-secret"
@@ -190,7 +190,7 @@ fill in the required connection details automatically when you enter your
 Matrix Identifier. See
 [Try Matrix Now!](https://matrix.org/docs/projects/try-matrix-now.html)
 for a list of existing clients and their supported featureset.
-```
+```nix
 {
   services.nginx.virtualHosts."element.${fqdn}" = {
     enableACME = true;
diff --git a/nixpkgs/nixos/modules/services/misc/anki-sync-server.md b/nixpkgs/nixos/modules/services/misc/anki-sync-server.md
index 5d2b4da4d2fc..f58d3d8ad0da 100644
--- a/nixpkgs/nixos/modules/services/misc/anki-sync-server.md
+++ b/nixpkgs/nixos/modules/services/misc/anki-sync-server.md
@@ -16,7 +16,7 @@ unit which runs the sync server with an isolated user using the systemd
 `DynamicUser` option.
 
 This can be done by enabling the `anki-sync-server` service:
-```
+```nix
 { ... }:
 
 {
@@ -27,7 +27,7 @@ This can be done by enabling the `anki-sync-server` service:
 It is necessary to set at least one username-password pair under
 {option}`services.anki-sync-server.users`. For example
 
-```
+```nix
 {
   services.anki-sync-server.users = [
     {
@@ -50,7 +50,7 @@ you want to expose the sync server directly to other computers (not recommended
 in most circumstances, because the sync server doesn't use HTTPS), then set the
 following options:
 
-```
+```nix
 {
   services.anki-sync-server.host = "0.0.0.0";
   services.anki-sync-server.openFirewall = true;
diff --git a/nixpkgs/nixos/modules/services/misc/forgejo.md b/nixpkgs/nixos/modules/services/misc/forgejo.md
index 14b21933e6b0..f234ebf44aef 100644
--- a/nixpkgs/nixos/modules/services/misc/forgejo.md
+++ b/nixpkgs/nixos/modules/services/misc/forgejo.md
@@ -57,23 +57,25 @@ locations and database, instead of having to copy or rename them.
 Make sure to disable `services.gitea`, when doing this.
 
 ```nix
-services.gitea.enable = false;
-
-services.forgejo = {
-  enable = true;
-  user = "gitea";
-  group = "gitea";
-  stateDir = "/var/lib/gitea";
-  database.name = "gitea";
-  database.user = "gitea";
-};
-
-users.users.gitea = {
-  home = "/var/lib/gitea";
-  useDefaultShell = true;
-  group = "gitea";
-  isSystemUser = true;
-};
-
-users.groups.gitea = {};
+{
+  services.gitea.enable = false;
+
+  services.forgejo = {
+    enable = true;
+    user = "gitea";
+    group = "gitea";
+    stateDir = "/var/lib/gitea";
+    database.name = "gitea";
+    database.user = "gitea";
+  };
+
+  users.users.gitea = {
+    home = "/var/lib/gitea";
+    useDefaultShell = true;
+    group = "gitea";
+    isSystemUser = true;
+  };
+
+  users.groups.gitea = {};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/misc/gitlab.md b/nixpkgs/nixos/modules/services/misc/gitlab.md
index 916b23584ed0..f7a5a8027489 100644
--- a/nixpkgs/nixos/modules/services/misc/gitlab.md
+++ b/nixpkgs/nixos/modules/services/misc/gitlab.md
@@ -10,19 +10,21 @@ configure a webserver to proxy HTTP requests to the socket.
 
 For instance, the following configuration could be used to use nginx as
 frontend proxy:
-```
-services.nginx = {
-  enable = true;
-  recommendedGzipSettings = true;
-  recommendedOptimisation = true;
-  recommendedProxySettings = true;
-  recommendedTlsSettings = true;
-  virtualHosts."git.example.com" = {
-    enableACME = true;
-    forceSSL = true;
-    locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket";
+```nix
+{
+  services.nginx = {
+    enable = true;
+    recommendedGzipSettings = true;
+    recommendedOptimisation = true;
+    recommendedProxySettings = true;
+    recommendedTlsSettings = true;
+    virtualHosts."git.example.com" = {
+      enableACME = true;
+      forceSSL = true;
+      locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket";
+    };
   };
-};
+}
 ```
 
 ## Configuring {#module-services-gitlab-configuring}
@@ -35,36 +37,38 @@ The default state dir is `/var/gitlab/state`. This is where
 all data like the repositories and uploads will be stored.
 
 A basic configuration with some custom settings could look like this:
-```
-services.gitlab = {
-  enable = true;
-  databasePasswordFile = "/var/keys/gitlab/db_password";
-  initialRootPasswordFile = "/var/keys/gitlab/root_password";
-  https = true;
-  host = "git.example.com";
-  port = 443;
-  user = "git";
-  group = "git";
-  smtp = {
+```nix
+{
+  services.gitlab = {
     enable = true;
-    address = "localhost";
-    port = 25;
-  };
-  secrets = {
-    dbFile = "/var/keys/gitlab/db";
-    secretFile = "/var/keys/gitlab/secret";
-    otpFile = "/var/keys/gitlab/otp";
-    jwsFile = "/var/keys/gitlab/jws";
-  };
-  extraConfig = {
-    gitlab = {
-      email_from = "gitlab-no-reply@example.com";
-      email_display_name = "Example GitLab";
-      email_reply_to = "gitlab-no-reply@example.com";
-      default_projects_features = { builds = false; };
+    databasePasswordFile = "/var/keys/gitlab/db_password";
+    initialRootPasswordFile = "/var/keys/gitlab/root_password";
+    https = true;
+    host = "git.example.com";
+    port = 443;
+    user = "git";
+    group = "git";
+    smtp = {
+      enable = true;
+      address = "localhost";
+      port = 25;
+    };
+    secrets = {
+      dbFile = "/var/keys/gitlab/db";
+      secretFile = "/var/keys/gitlab/secret";
+      otpFile = "/var/keys/gitlab/otp";
+      jwsFile = "/var/keys/gitlab/jws";
+    };
+    extraConfig = {
+      gitlab = {
+        email_from = "gitlab-no-reply@example.com";
+        email_display_name = "Example GitLab";
+        email_reply_to = "gitlab-no-reply@example.com";
+        default_projects_features = { builds = false; };
+      };
     };
   };
-};
+}
 ```
 
 If you're setting up a new GitLab instance, generate new
diff --git a/nixpkgs/nixos/modules/services/misc/mollysocket.nix b/nixpkgs/nixos/modules/services/misc/mollysocket.nix
new file mode 100644
index 000000000000..f40caa4a782e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/mollysocket.nix
@@ -0,0 +1,133 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) getExe mkIf mkOption mkEnableOption optionals types;
+
+  cfg = config.services.mollysocket;
+  configuration = format.generate "mollysocket.conf" cfg.settings;
+  format = pkgs.formats.toml { };
+  package = pkgs.writeShellScriptBin "mollysocket" ''
+    MOLLY_CONF=${configuration} exec ${getExe pkgs.mollysocket} "$@"
+  '';
+in {
+  options.services.mollysocket = {
+    enable = mkEnableOption ''
+      [MollySocket](https://github.com/mollyim/mollysocket) for getting Signal
+      notifications via UnifiedPush
+    '';
+
+    settings = mkOption {
+      default = { };
+      description = ''
+        Configuration for MollySocket. Available options are listed
+        [here](https://github.com/mollyim/mollysocket#configuration).
+      '';
+      type = types.submodule {
+        freeformType = format.type;
+        options = {
+          host = mkOption {
+            default = "127.0.0.1";
+            description = "Listening address of the web server";
+            type = types.str;
+          };
+
+          port = mkOption {
+            default = 8020;
+            description = "Listening port of the web server";
+            type = types.port;
+          };
+
+          allowed_endpoints = mkOption {
+            default = [ "*" ];
+            description = "List of UnifiedPush servers";
+            example = [ "https://ntfy.sh" ];
+            type = with types; listOf str;
+          };
+
+          allowed_uuids = mkOption {
+            default = [ "*" ];
+            description = "UUIDs of Signal accounts that may use this server";
+            example = [ "abcdef-12345-tuxyz-67890" ];
+            type = with types; listOf str;
+          };
+        };
+      };
+    };
+
+    environmentFile = mkOption {
+      default = null;
+      description = ''
+        Environment file (see {manpage}`systemd.exec(5)` "EnvironmentFile="
+        section for the syntax) passed to the service. This option can be
+        used to safely include secrets in the configuration.
+      '';
+      example = "/run/secrets/mollysocket";
+      type = with types; nullOr path;
+    };
+
+    logLevel = mkOption {
+      default = "info";
+      description = "Set the {env}`RUST_LOG` environment variable";
+      example = "debug";
+      type = types.str;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [
+      package
+    ];
+
+    # see https://github.com/mollyim/mollysocket/blob/main/mollysocket.service
+    systemd.services.mollysocket = {
+      description = "MollySocket";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      environment.RUST_LOG = cfg.logLevel;
+      serviceConfig = let
+        capabilities = [ "" ] ++ optionals (cfg.settings.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+      in {
+        EnvironmentFile = cfg.environmentFile;
+        ExecStart = "${getExe package} server";
+        KillSignal = "SIGINT";
+        Restart = "on-failure";
+        StateDirectory = "mollysocket";
+        TimeoutStopSec = 5;
+        WorkingDirectory = "/var/lib/mollysocket";
+
+        # hardening
+        AmbientCapabilities = capabilities;
+        CapabilityBoundingSet = capabilities;
+        DevicePolicy = "closed";
+        DynamicUser = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@resources" "~@privileged" ];
+        UMask = "0077";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ dotlambda ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/paperless.nix b/nixpkgs/nixos/modules/services/misc/paperless.nix
index 9314c4f3848d..9301d1f68725 100644
--- a/nixpkgs/nixos/modules/services/misc/paperless.nix
+++ b/nixpkgs/nixos/modules/services/misc/paperless.nix
@@ -27,6 +27,8 @@ let
       name = "paperless_ngx_nltk_data";
       paths = pkg.nltkData;
     };
+  } // optionalAttrs (cfg.openMPThreadingWorkaround) {
+    OMP_NUM_THREADS = "1";
   } // (lib.mapAttrs (_: s:
     if (lib.isAttrs s || lib.isList s) then builtins.toJSON s
     else if lib.isBool s then lib.boolToString s
@@ -199,6 +201,20 @@ in
     };
 
     package = mkPackageOption pkgs "paperless-ngx" { };
+
+    openMPThreadingWorkaround = mkEnableOption ''
+      a workaround for document classifier timeouts.
+
+      Paperless uses OpenBLAS via scikit-learn for document classification.
+
+      The default is to use threading for OpenMP but this would cause the
+      document classifier to spin on one core seemingly indefinitely if there
+      are large amounts of classes per classification; causing it to
+      effectively never complete due to running into timeouts.
+
+      This sets `OMP_NUM_THREADS` to `1` in order to mitigate the issue. See
+      https://github.com/NixOS/nixpkgs/issues/240591 for more information.
+    '' // mkOption { default = true; };
   };
 
   config = mkIf cfg.enable {
diff --git a/nixpkgs/nixos/modules/services/misc/sourcehut/default.md b/nixpkgs/nixos/modules/services/misc/sourcehut/default.md
index 44d58aa0bef3..f965c395038a 100644
--- a/nixpkgs/nixos/modules/services/misc/sourcehut/default.md
+++ b/nixpkgs/nixos/modules/services/misc/sourcehut/default.md
@@ -12,7 +12,7 @@ This NixOS module also provides basic configuration integrating Sourcehut into l
 and `services.postgresql` services.
 
 A very basic configuration may look like this:
-```
+```nix
 { pkgs, ... }:
 let
   fqdn =
@@ -66,9 +66,9 @@ in {
     # Settings to setup what certificates are used for which endpoint.
     virtualHosts = {
       "${fqdn}".enableACME = true;
-      "meta.${fqdn}".useACMEHost = fqdn:
-      "man.${fqdn}".useACMEHost = fqdn:
-      "git.${fqdn}".useACMEHost = fqdn:
+      "meta.${fqdn}".useACMEHost = fqdn;
+      "man.${fqdn}".useACMEHost = fqdn;
+      "git.${fqdn}".useACMEHost = fqdn;
     };
   };
 }
diff --git a/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix b/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix
index a8300ecd5233..1b1fde78ad0a 100644
--- a/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix
+++ b/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix
@@ -20,7 +20,10 @@ let
   manage = pkgs.writeShellScript "manage" ''
     set -o allexport # Export the following env vars
     ${lib.toShellVars env}
-    exec ${pkg}/bin/tandoor-recipes "$@"
+    eval "$(${config.systemd.package}/bin/systemctl show -pUID,GID,MainPID tandoor-recipes.service)"
+    exec ${pkgs.util-linux}/bin/nsenter \
+      -t $MainPID -m -S $UID -G $GID \
+      ${pkg}/bin/tandoor-recipes "$@"
   '';
 in
 {
@@ -82,6 +85,7 @@ in
         Restart = "on-failure";
 
         User = "tandoor_recipes";
+        Group = "tandoor_recipes";
         DynamicUser = true;
         StateDirectory = "tandoor-recipes";
         WorkingDirectory = "/var/lib/tandoor-recipes";
diff --git a/nixpkgs/nixos/modules/services/misc/weechat.md b/nixpkgs/nixos/modules/services/misc/weechat.md
index 21f41be5b4a0..fb20ebe1e4db 100644
--- a/nixpkgs/nixos/modules/services/misc/weechat.md
+++ b/nixpkgs/nixos/modules/services/misc/weechat.md
@@ -12,7 +12,7 @@ unit which runs the chat client in a detached
 session.
 
 This can be done by enabling the `weechat` service:
-```
+```nix
 { ... }:
 
 {
@@ -30,7 +30,7 @@ allow your another user to attach to this session, the
 `screenrc` needs to be tweaked by adding
 [multiuser](https://www.gnu.org/software/screen/manual/html_node/Multiuser.html#Multiuser)
 support:
-```
+```nix
 {
   programs.screen.screenrc = ''
     multiuser on
diff --git a/nixpkgs/nixos/modules/services/monitoring/certspotter.md b/nixpkgs/nixos/modules/services/monitoring/certspotter.md
index 9bf6e1d946a0..e999bfe65ec3 100644
--- a/nixpkgs/nixos/modules/services/monitoring/certspotter.md
+++ b/nixpkgs/nixos/modules/services/monitoring/certspotter.md
@@ -9,17 +9,19 @@ A basic config that notifies you of all certificate changes for your
 domain would look as follows:
 
 ```nix
-services.certspotter = {
-  enable = true;
-  # replace example.org with your domain name
-  watchlist = [ ".example.org" ];
-  emailRecipients = [ "webmaster@example.org" ];
-};
+{
+  services.certspotter = {
+    enable = true;
+    # replace example.org with your domain name
+    watchlist = [ ".example.org" ];
+    emailRecipients = [ "webmaster@example.org" ];
+  };
 
-# Configure an SMTP client
-programs.msmtp.enable = true;
-# Or you can use any other module that provides sendmail, like
-# services.nullmailer, services.opensmtpd, services.postfix
+  # Configure an SMTP client
+  programs.msmtp.enable = true;
+  # Or you can use any other module that provides sendmail, like
+  # services.nullmailer, services.opensmtpd, services.postfix
+}
 ```
 
 In this case, the leading dot in `".example.org"` means that Cert
@@ -59,16 +61,18 @@ For example, you can remove `emailRecipients` and send email
 notifications manually using the following hook:
 
 ```nix
-services.certspotter.hooks = [
-  (pkgs.writeShellScript "certspotter-hook" ''
-    function print_email() {
-      echo "Subject: [certspotter] $SUMMARY"
-      echo "Mime-Version: 1.0"
-      echo "Content-Type: text/plain; charset=US-ASCII"
-      echo
-      cat "$TEXT_FILENAME"
-    }
-    print_email | ${config.services.certspotter.sendmailPath} -i webmaster@example.org
-  '')
-];
+{
+  services.certspotter.hooks = [
+    (pkgs.writeShellScript "certspotter-hook" ''
+      function print_email() {
+        echo "Subject: [certspotter] $SUMMARY"
+        echo "Mime-Version: 1.0"
+        echo "Content-Type: text/plain; charset=US-ASCII"
+        echo
+        cat "$TEXT_FILENAME"
+      }
+      print_email | ${config.services.certspotter.sendmailPath} -i webmaster@example.org
+    '')
+  ];
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/monitoring/goss.md b/nixpkgs/nixos/modules/services/monitoring/goss.md
index 1e636aa3bdf3..bf91d42011fa 100644
--- a/nixpkgs/nixos/modules/services/monitoring/goss.md
+++ b/nixpkgs/nixos/modules/services/monitoring/goss.md
@@ -7,7 +7,7 @@ for validating a server's configuration.
 
 A minimal configuration looks like this:
 
-```
+```nix
 {
   services.goss = {
     enable = true;
diff --git a/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md b/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md
index eac07e0cc9fe..765846bbbaf3 100644
--- a/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md
+++ b/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md
@@ -11,15 +11,17 @@ email address and saves them to a local Elasticsearch instance looks
 like this:
 
 ```nix
-services.parsedmarc = {
-  enable = true;
-  settings.imap = {
-    host = "imap.example.com";
-    user = "alice@example.com";
-    password = "/path/to/imap_password_file";
+{
+  services.parsedmarc = {
+    enable = true;
+    settings.imap = {
+      host = "imap.example.com";
+      user = "alice@example.com";
+      password = "/path/to/imap_password_file";
+    };
+    provision.geoIp = false; # Not recommended!
   };
-  provision.geoIp = false; # Not recommended!
-};
+}
 ```
 
 Note that GeoIP provisioning is disabled in the example for
@@ -37,16 +39,18 @@ configured in the domain's dmarc policy is
 `dmarc@monitoring.example.com`.
 
 ```nix
-services.parsedmarc = {
-  enable = true;
-  provision = {
-    localMail = {
-      enable = true;
-      hostname = monitoring.example.com;
+{
+  services.parsedmarc = {
+    enable = true;
+    provision = {
+      localMail = {
+        enable = true;
+        hostname = monitoring.example.com;
+      };
+      geoIp = false; # Not recommended!
     };
-    geoIp = false; # Not recommended!
   };
-};
+}
 ```
 
 ## Grafana and GeoIP {#module-services-parsedmarc-grafana-geoip}
@@ -58,55 +62,57 @@ is automatically added as a Grafana datasource, and the dashboard is
 added to Grafana as well.
 
 ```nix
-services.parsedmarc = {
-  enable = true;
-  provision = {
-    localMail = {
-      enable = true;
-      hostname = url;
-    };
-    grafana = {
-      datasource = true;
-      dashboard = true;
+{
+  services.parsedmarc = {
+    enable = true;
+    provision = {
+      localMail = {
+        enable = true;
+        hostname = url;
+      };
+      grafana = {
+        datasource = true;
+        dashboard = true;
+      };
     };
   };
-};
 
-# Not required, but recommended for full functionality
-services.geoipupdate = {
-  settings = {
-    AccountID = 000000;
-    LicenseKey = "/path/to/license_key_file";
+  # Not required, but recommended for full functionality
+  services.geoipupdate = {
+    settings = {
+      AccountID = 000000;
+      LicenseKey = "/path/to/license_key_file";
+    };
   };
-};
 
-services.grafana = {
-  enable = true;
-  addr = "0.0.0.0";
-  domain = url;
-  rootUrl = "https://" + url;
-  protocol = "socket";
-  security = {
-    adminUser = "admin";
-    adminPasswordFile = "/path/to/admin_password_file";
-    secretKeyFile = "/path/to/secret_key_file";
+  services.grafana = {
+    enable = true;
+    addr = "0.0.0.0";
+    domain = url;
+    rootUrl = "https://" + url;
+    protocol = "socket";
+    security = {
+      adminUser = "admin";
+      adminPasswordFile = "/path/to/admin_password_file";
+      secretKeyFile = "/path/to/secret_key_file";
+    };
   };
-};
 
-services.nginx = {
-  enable = true;
-  recommendedTlsSettings = true;
-  recommendedOptimisation = true;
-  recommendedGzipSettings = true;
-  recommendedProxySettings = true;
-  upstreams.grafana.servers."unix:/${config.services.grafana.socket}" = {};
-  virtualHosts.${url} = {
-    root = config.services.grafana.staticRootPath;
-    enableACME = true;
-    forceSSL = true;
-    locations."/".tryFiles = "$uri @grafana";
-    locations."@grafana".proxyPass = "http://grafana";
+  services.nginx = {
+    enable = true;
+    recommendedTlsSettings = true;
+    recommendedOptimisation = true;
+    recommendedGzipSettings = true;
+    recommendedProxySettings = true;
+    upstreams.grafana.servers."unix:/${config.services.grafana.socket}" = {};
+    virtualHosts.${url} = {
+      root = config.services.grafana.staticRootPath;
+      enableACME = true;
+      forceSSL = true;
+      locations."/".tryFiles = "$uri @grafana";
+      locations."@grafana".proxyPass = "http://grafana";
+    };
   };
-};
-users.users.nginx.extraGroups = [ "grafana" ];
+  users.users.nginx.extraGroups = [ "grafana" ];
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md
index 34fadecadc74..b344534f6aee 100644
--- a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md
@@ -9,7 +9,8 @@ One of the most common exporters is the
 [node exporter](https://github.com/prometheus/node_exporter),
 it provides hardware and OS metrics from the host it's
 running on. The exporter could be configured as follows:
-```
+```nix
+{
   services.prometheus.exporters.node = {
     enable = true;
     port = 9100;
@@ -23,6 +24,7 @@ running on. The exporter could be configured as follows:
     openFirewall = true;
     firewallFilter = "-i br0 -p tcp -m tcp --dport 9100";
   };
+}
 ```
 It should now serve all metrics from the collectors that are explicitly
 enabled and the ones that are
@@ -35,7 +37,8 @@ configuration see `man configuration.nix` or search through
 the [available options](https://nixos.org/nixos/options.html#prometheus.exporters).
 
 Prometheus can now be configured to consume the metrics produced by the exporter:
-```
+```nix
+{
     services.prometheus = {
       # ...
 
@@ -49,7 +52,8 @@ Prometheus can now be configured to consume the metrics produced by the exporter
       ];
 
       # ...
-    }
+    };
+}
 ```
 
 ## Adding a new exporter {#module-services-prometheus-exporters-new-exporter}
@@ -75,7 +79,7 @@ example:
     `nixos/modules/services/monitoring/prometheus/exporters/`
     directory, which will be called postfix.nix and contains all exporter
     specific options and configuration:
-    ```
+    ```nix
     # nixpkgs/nixos/modules/services/prometheus/exporters/postfix.nix
     { config, lib, pkgs, options }:
 
@@ -148,7 +152,7 @@ example:
 Should an exporter option change at some point, it is possible to add
 information about the change to the exporter definition similar to
 `nixpkgs/nixos/modules/rename.nix`:
-```
+```nix
 { config, lib, pkgs, options }:
 
 with lib;
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md b/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md
index 8d8486507b77..626d69df84a5 100644
--- a/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md
+++ b/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md
@@ -8,7 +8,7 @@ replication tool for SQLite.
 Litestream service is managed by a dedicated user named `litestream`
 which needs permission to the database file. Here's an example config which gives
 required permissions to access [grafana database](#opt-services.grafana.settings.database.path):
-```
+```nix
 { pkgs, ... }:
 {
   users.users.litestream.extraGroups = [ "grafana" ];
diff --git a/nixpkgs/nixos/modules/services/networking/dnsproxy.nix b/nixpkgs/nixos/modules/services/networking/dnsproxy.nix
new file mode 100644
index 000000000000..f0be74d7591f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dnsproxy.nix
@@ -0,0 +1,106 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    escapeShellArgs
+    getExe
+    lists
+    literalExpression
+    maintainers
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOption
+    types;
+
+  cfg = config.services.dnsproxy;
+
+  yaml = pkgs.formats.yaml { };
+  configFile = yaml.generate "config.yaml" cfg.settings;
+
+  finalFlags = (lists.optional (cfg.settings != { }) "--config-path=${configFile}") ++ cfg.flags;
+in
+{
+
+  options.services.dnsproxy = {
+
+    enable = mkEnableOption (lib.mdDoc "dnsproxy");
+
+    package = mkPackageOption pkgs "dnsproxy" { };
+
+    settings = mkOption {
+      type = yaml.type;
+      default = { };
+      example = literalExpression ''
+        {
+          bootstrap = [
+            "8.8.8.8:53"
+          ];
+          listen-addrs = [
+            "0.0.0.0"
+          ];
+          listen-ports = [
+            53
+          ];
+          upstream = [
+            "1.1.1.1:53"
+          ];
+        }
+      '';
+      description = mdDoc ''
+        Contents of the `config.yaml` config file.
+        The `--config-path` argument will only be passed if this set is not empty.
+
+        See <https://github.com/AdguardTeam/dnsproxy/blob/master/config.yaml.dist>.
+      '';
+    };
+
+    flags = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--upstream=1.1.1.1:53" ];
+      description = lib.mdDoc ''
+        A list of extra command-line flags to pass to dnsproxy. For details on the
+        available options, see <https://github.com/AdguardTeam/dnsproxy#usage>.
+        Keep in mind that options passed through command-line flags override
+        config options.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.dnsproxy = {
+      description = "Simple DNS proxy with DoH, DoT, DoQ and DNSCrypt support";
+      after = [ "network.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${getExe cfg.package} ${escapeShellArgs finalFlags}";
+        Restart = "always";
+        RestartSec = 10;
+        DynamicUser = true;
+
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        ProtectClock = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [ "@system-service" "~@privileged @resources" ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ diogotcorreia ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md b/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
index 4d8777d204bb..f6b515e67f15 100644
--- a/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
+++ b/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
@@ -7,19 +7,21 @@ A storage server for Firefox Sync that you can easily host yourself.
 The absolute minimal configuration for the sync server looks like this:
 
 ```nix
-services.mysql.package = pkgs.mariadb;
-
-services.firefox-syncserver = {
-  enable = true;
-  secrets = builtins.toFile "sync-secrets" ''
-    SYNC_MASTER_SECRET=this-secret-is-actually-leaked-to-/nix/store
-  '';
-  singleNode = {
+{
+  services.mysql.package = pkgs.mariadb;
+
+  services.firefox-syncserver = {
     enable = true;
-    hostname = "localhost";
-    url = "http://localhost:5000";
+    secrets = builtins.toFile "sync-secrets" ''
+      SYNC_MASTER_SECRET=this-secret-is-actually-leaked-to-/nix/store
+    '';
+    singleNode = {
+      enable = true;
+      hostname = "localhost";
+      url = "http://localhost:5000";
+    };
   };
-};
+}
 ```
 
 This will start a sync server that is only accessible locally. Once the services is
diff --git a/nixpkgs/nixos/modules/services/networking/microsocks.nix b/nixpkgs/nixos/modules/services/networking/microsocks.nix
new file mode 100644
index 000000000000..be79a8495636
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/microsocks.nix
@@ -0,0 +1,146 @@
+{ config,
+  lib,
+  pkgs,
+  ...
+}:
+
+let
+  cfg = config.services.microsocks;
+
+  cmd =
+    if cfg.execWrapper != null
+    then "${cfg.execWrapper} ${cfg.package}/bin/microsocks"
+    else "${cfg.package}/bin/microsocks";
+  args =
+    [ "-i" cfg.ip "-p" (toString cfg.port) ]
+    ++ lib.optionals (cfg.authOnce) [ "-1" ]
+    ++ lib.optionals (cfg.disableLogging) [ "-q" ]
+    ++ lib.optionals (cfg.outgoingBindIp != null) [ "-b" cfg.outgoingBindIp ]
+    ++ lib.optionals (cfg.authUsername != null) [ "-u" cfg.authUsername ];
+in {
+  options.services.microsocks = {
+    enable = lib.mkEnableOption (lib.mdDoc "Tiny, portable SOCKS5 server with very moderate resource usage");
+    user = lib.mkOption {
+      default = "microsocks";
+      description = lib.mdDoc "User microsocks runs as.";
+      type = lib.types.str;
+    };
+    group = lib.mkOption {
+      default = "microsocks";
+      description = lib.mdDoc "Group microsocks runs as.";
+      type = lib.types.str;
+    };
+    package = lib.mkPackageOption pkgs "microsocks" {};
+    ip = lib.mkOption {
+      type = lib.types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        IP on which microsocks should listen. Defaults to 127.0.0.1 for
+        security reasons.
+      '';
+    };
+    port = lib.mkOption {
+      type = lib.types.port;
+      default = 1080;
+      description = lib.mdDoc "Port on which microsocks should listen.";
+    };
+    disableLogging = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "If true, microsocks will not log any messages to stdout/stderr.";
+    };
+    authOnce = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If true, once a specific ip address authed successfully with user/pass,
+        it is added to a whitelist and may use the proxy without auth.
+      '';
+    };
+    outgoingBindIp = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc "Specifies which ip outgoing connections are bound to";
+    };
+    authUsername = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      example = "alice";
+      description = lib.mdDoc "Optional username to use for authentication.";
+    };
+    authPasswordFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      example = "/run/secrets/microsocks-password";
+      description = lib.mdDoc "Path to a file containing the password for authentication.";
+    };
+    execWrapper = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      example = ''
+        ''${pkgs.mullvad-vpn}/bin/mullvad-exclude
+      '';
+      description = lib.mdDoc ''
+        An optional command to prepend to the microsocks command (such as proxychains, or a VPN exclude command).
+      '';
+    };
+  };
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.authUsername != null) == (cfg.authPasswordFile != null);
+        message = "Need to set both authUsername and authPasswordFile for microsocks";
+      }
+    ];
+    users = {
+      users = lib.mkIf (cfg.user == "microsocks") {
+        microsocks = {
+          group = cfg.group;
+          isSystemUser = true;
+        };
+      };
+      groups = lib.mkIf (cfg.group == "microsocks") {
+        microsocks = {};
+      };
+    };
+    systemd.services.microsocks = {
+      enable = true;
+      description = "a tiny socks server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "on-failure";
+        RestartSec = 10;
+        LoadCredential = lib.optionalString (cfg.authPasswordFile != null) "MICROSOCKS_PASSWORD_FILE:${cfg.authPasswordFile}";
+        MemoryDenyWriteExecute = true;
+        SystemCallArchitectures = "native";
+        PrivateTmp = true;
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        PrivateDevices = true;
+        RestrictSUIDSGID = true;
+        RestrictNamespaces = [
+          "cgroup"
+          "ipc"
+          "pid"
+          "user"
+          "uts"
+        ];
+      };
+      script =
+        if cfg.authPasswordFile != null
+        then ''
+          PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/MICROSOCKS_PASSWORD_FILE")
+          ${cmd} ${lib.escapeShellArgs args} -P "$PASSWORD"
+        ''
+        else ''
+          ${cmd} ${lib.escapeShellArgs args}
+        '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mosquitto.md b/nixpkgs/nixos/modules/services/networking/mosquitto.md
index 5cdb598151e5..66b3ad6cfa8f 100644
--- a/nixpkgs/nixos/modules/services/networking/mosquitto.md
+++ b/nixpkgs/nixos/modules/services/networking/mosquitto.md
@@ -7,14 +7,16 @@ Mosquitto is a MQTT broker often used for IoT or home automation data transport.
 A minimal configuration for Mosquitto is
 
 ```nix
-services.mosquitto = {
-  enable = true;
-  listeners = [ {
-    acl = [ "pattern readwrite #" ];
-    omitPasswordAuth = true;
-    settings.allow_anonymous = true;
-  } ];
-};
+{
+  services.mosquitto = {
+    enable = true;
+    listeners = [ {
+      acl = [ "pattern readwrite #" ];
+      omitPasswordAuth = true;
+      settings.allow_anonymous = true;
+    } ];
+  };
+}
 ```
 
 This will start a broker on port 1883, listening on all interfaces of the machine, allowing
@@ -25,37 +27,42 @@ full read access to a user `monitor` and restricted write access to a user `serv
 like
 
 ```nix
-services.mosquitto = {
-  enable = true;
-  listeners = [ {
-    users = {
-      monitor = {
-        acl = [ "read #" ];
-        password = "monitor";
+{
+  services.mosquitto = {
+    enable = true;
+    listeners = [ {
+      users = {
+        monitor = {
+          acl = [ "read #" ];
+          password = "monitor";
+        };
+        service = {
+          acl = [ "write service/#" ];
+          password = "service";
+        };
       };
-      service = {
-        acl = [ "write service/#" ];
-        password = "service";
-      };
-    };
-  } ];
-};
+    } ];
+  };
+}
 ```
 
 TLS authentication is configured by setting TLS-related options of the listener:
 
 ```nix
-services.mosquitto = {
-  enable = true;
-  listeners = [ {
-    port = 8883; # port change is not required, but helpful to avoid mistakes
-    # ...
-    settings = {
-      cafile = "/path/to/mqtt.ca.pem";
-      certfile = "/path/to/mqtt.pem";
-      keyfile = "/path/to/mqtt.key";
-    };
-  } ];
+{
+  services.mosquitto = {
+    enable = true;
+    listeners = [ {
+      port = 8883; # port change is not required, but helpful to avoid mistakes
+      # ...
+      settings = {
+        cafile = "/path/to/mqtt.ca.pem";
+        certfile = "/path/to/mqtt.pem";
+        keyfile = "/path/to/mqtt.key";
+      };
+    } ];
+  };
+}
 ```
 
 ## Configuration {#module-services-mosquitto-config}
diff --git a/nixpkgs/nixos/modules/services/networking/netbird.md b/nixpkgs/nixos/modules/services/networking/netbird.md
index a326207becc8..e1f6753cbd30 100644
--- a/nixpkgs/nixos/modules/services/networking/netbird.md
+++ b/nixpkgs/nixos/modules/services/networking/netbird.md
@@ -5,7 +5,9 @@
 The absolute minimal configuration for the netbird daemon looks like this:
 
 ```nix
-services.netbird.enable = true;
+{
+  services.netbird.enable = true;
+}
 ```
 
 This will set up a netbird service listening on the port `51820` associated to the
@@ -14,7 +16,9 @@ This will set up a netbird service listening on the port `51820` associated to t
 It is strictly equivalent to setting:
 
 ```nix
-services.netbird.tunnels.wt0.stateDir = "netbird";
+{
+  services.netbird.tunnels.wt0.stateDir = "netbird";
+}
 ```
 
 The `enable` option is mainly kept for backward compatibility, as defining netbird
@@ -29,11 +33,13 @@ The following configuration will start a netbird daemon using the interface `wt1
 the port 51830. Its configuration file will then be located at `/var/lib/netbird-wt1/config.json`.
 
 ```nix
-services.netbird.tunnels = {
-  wt1 = {
-    port = 51830;
+{
+  services.netbird.tunnels = {
+    wt1 = {
+      port = 51830;
+    };
   };
-};
+}
 ```
 
 To interact with it, you will need to specify the correct daemon address:
@@ -48,9 +54,11 @@ It is also possible to overwrite default options passed to the service, for
 example:
 
 ```nix
-services.netbird.tunnels.wt1.environment = {
-  NB_DAEMON_ADDR = "unix:///var/run/toto.sock"
-};
+{
+  services.netbird.tunnels.wt1.environment = {
+    NB_DAEMON_ADDR = "unix:///var/run/toto.sock";
+  };
+}
 ```
 
 This will set the socket to interact with the netbird service to `/var/run/toto.sock`.
diff --git a/nixpkgs/nixos/modules/services/networking/pleroma.md b/nixpkgs/nixos/modules/services/networking/pleroma.md
index 7c499e1c616c..c2313fd63e6a 100644
--- a/nixpkgs/nixos/modules/services/networking/pleroma.md
+++ b/nixpkgs/nixos/modules/services/networking/pleroma.md
@@ -17,11 +17,13 @@ The `config.exs` file can be further customized following the instructions on th
 ## Initializing the database {#module-services-pleroma-initialize-db}
 
 First, the Postgresql service must be enabled in the NixOS configuration
-```
-services.postgresql = {
-  enable = true;
-  package = pkgs.postgresql_13;
-};
+```nix
+{
+  services.postgresql = {
+    enable = true;
+    package = pkgs.postgresql_13;
+  };
+}
 ```
 and activated with the usual
 ```ShellSession
@@ -38,43 +40,45 @@ $ sudo -u postgres psql -f setup.psql
 In this section we will enable the Pleroma service only locally, so its configurations can be improved incrementally.
 
 This is an example of configuration, where [](#opt-services.pleroma.configs) option contains the content of the file `config.exs`, generated [in the first section](#module-services-pleroma-generate-config), but with the secrets (database password, endpoint secret key, salts, etc.) removed. Removing secrets is important, because otherwise they will be stored publicly in the Nix store.
-```
-services.pleroma = {
-  enable = true;
-  secretConfigFile = "/var/lib/pleroma/secrets.exs";
-  configs = [
-    ''
-    import Config
-
-    config :pleroma, Pleroma.Web.Endpoint,
-      url: [host: "pleroma.example.net", scheme: "https", port: 443],
-      http: [ip: {127, 0, 0, 1}, port: 4000]
-
-    config :pleroma, :instance,
-      name: "Test",
-      email: "admin@example.net",
-      notify_email: "admin@example.net",
-      limit: 5000,
-      registrations_open: true
-
-    config :pleroma, :media_proxy,
-      enabled: false,
-      redirect_on_failure: true
-
-    config :pleroma, Pleroma.Repo,
-      adapter: Ecto.Adapters.Postgres,
-      username: "pleroma",
-      database: "pleroma",
-      hostname: "localhost"
-
-    # Configure web push notifications
-    config :web_push_encryption, :vapid_details,
-      subject: "mailto:admin@example.net"
-
-    # ... TO CONTINUE ...
-    ''
-  ];
-};
+```nix
+{
+  services.pleroma = {
+    enable = true;
+    secretConfigFile = "/var/lib/pleroma/secrets.exs";
+    configs = [
+      ''
+      import Config
+
+      config :pleroma, Pleroma.Web.Endpoint,
+        url: [host: "pleroma.example.net", scheme: "https", port: 443],
+        http: [ip: {127, 0, 0, 1}, port: 4000]
+
+      config :pleroma, :instance,
+        name: "Test",
+        email: "admin@example.net",
+        notify_email: "admin@example.net",
+        limit: 5000,
+        registrations_open: true
+
+      config :pleroma, :media_proxy,
+        enabled: false,
+        redirect_on_failure: true
+
+      config :pleroma, Pleroma.Repo,
+        adapter: Ecto.Adapters.Postgres,
+        username: "pleroma",
+        database: "pleroma",
+        hostname: "localhost"
+
+      # Configure web push notifications
+      config :web_push_encryption, :vapid_details,
+        subject: "mailto:admin@example.net"
+
+      # ... TO CONTINUE ...
+      ''
+    ];
+  };
+}
 ```
 
 Secrets must be moved into a file pointed by [](#opt-services.pleroma.secretConfigFile), in our case `/var/lib/pleroma/secrets.exs`. This file can be created copying the previously generated `config.exs` file and then removing all the settings, except the secrets. This is an example
@@ -121,60 +125,62 @@ $ pleroma_ctl user new <nickname> <email>  --admin --moderator --password <passw
 
 In this configuration, Pleroma is listening only on the local port 4000. Nginx can be configured as a Reverse Proxy, for forwarding requests from public ports to the Pleroma service. This is an example of configuration, using
 [Let's Encrypt](https://letsencrypt.org/) for the TLS certificates
-```
-security.acme = {
-  email = "root@example.net";
-  acceptTerms = true;
-};
-
-services.nginx = {
-  enable = true;
-  addSSL = true;
-
-  recommendedTlsSettings = true;
-  recommendedOptimisation = true;
-  recommendedGzipSettings = true;
-
-  recommendedProxySettings = false;
-  # NOTE: if enabled, the NixOS proxy optimizations will override the Pleroma
-  # specific settings, and they will enter in conflict.
-
-  virtualHosts = {
-    "pleroma.example.net" = {
-      http2 = true;
-      enableACME = true;
-      forceSSL = true;
-
-      locations."/" = {
-        proxyPass = "http://127.0.0.1:4000";
-
-        extraConfig = ''
-          etag on;
-          gzip on;
-
-          add_header 'Access-Control-Allow-Origin' '*' always;
-          add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
-          add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
-          add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
-          if ($request_method = OPTIONS) {
-            return 204;
-          }
-          add_header X-XSS-Protection "1; mode=block";
-          add_header X-Permitted-Cross-Domain-Policies none;
-          add_header X-Frame-Options DENY;
-          add_header X-Content-Type-Options nosniff;
-          add_header Referrer-Policy same-origin;
-          add_header X-Download-Options noopen;
-          proxy_http_version 1.1;
-          proxy_set_header Upgrade $http_upgrade;
-          proxy_set_header Connection "upgrade";
-          proxy_set_header Host $host;
-
-          client_max_body_size 16m;
-          # NOTE: increase if users need to upload very big files
-        '';
+```nix
+{
+  security.acme = {
+    email = "root@example.net";
+    acceptTerms = true;
+  };
+
+  services.nginx = {
+    enable = true;
+    addSSL = true;
+
+    recommendedTlsSettings = true;
+    recommendedOptimisation = true;
+    recommendedGzipSettings = true;
+
+    recommendedProxySettings = false;
+    # NOTE: if enabled, the NixOS proxy optimizations will override the Pleroma
+    # specific settings, and they will enter in conflict.
+
+    virtualHosts = {
+      "pleroma.example.net" = {
+        http2 = true;
+        enableACME = true;
+        forceSSL = true;
+
+        locations."/" = {
+          proxyPass = "http://127.0.0.1:4000";
+
+          extraConfig = ''
+            etag on;
+            gzip on;
+
+            add_header 'Access-Control-Allow-Origin' '*' always;
+            add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
+            add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
+            add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
+            if ($request_method = OPTIONS) {
+              return 204;
+            }
+            add_header X-XSS-Protection "1; mode=block";
+            add_header X-Permitted-Cross-Domain-Policies none;
+            add_header X-Frame-Options DENY;
+            add_header X-Content-Type-Options nosniff;
+            add_header Referrer-Policy same-origin;
+            add_header X-Download-Options noopen;
+            proxy_http_version 1.1;
+            proxy_set_header Upgrade $http_upgrade;
+            proxy_set_header Connection "upgrade";
+            proxy_set_header Host $host;
+
+            client_max_body_size 16m;
+            # NOTE: increase if users need to upload very big files
+          '';
+        };
       };
     };
   };
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/networking/prosody.md b/nixpkgs/nixos/modules/services/networking/prosody.md
index 2da2c242a98b..d6eee4e29f0a 100644
--- a/nixpkgs/nixos/modules/services/networking/prosody.md
+++ b/nixpkgs/nixos/modules/services/networking/prosody.md
@@ -25,25 +25,27 @@ A good configuration to start with, including a
 [Multi User Chat (MUC)](https://xmpp.org/extensions/xep-0045.html)
 endpoint as well as a [HTTP File Upload](https://xmpp.org/extensions/xep-0363.html)
 endpoint will look like this:
-```
-services.prosody = {
-  enable = true;
-  admins = [ "root@example.org" ];
-  ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
-  ssl.key = "/var/lib/acme/example.org/key.pem";
-  virtualHosts."example.org" = {
-      enabled = true;
-      domain = "example.org";
-      ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
-      ssl.key = "/var/lib/acme/example.org/key.pem";
-  };
-  muc = [ {
-      domain = "conference.example.org";
-  } ];
-  uploadHttp = {
-      domain = "upload.example.org";
+```nix
+{
+  services.prosody = {
+    enable = true;
+    admins = [ "root@example.org" ];
+    ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
+    ssl.key = "/var/lib/acme/example.org/key.pem";
+    virtualHosts."example.org" = {
+        enabled = true;
+        domain = "example.org";
+        ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
+        ssl.key = "/var/lib/acme/example.org/key.pem";
+    };
+    muc = [ {
+        domain = "conference.example.org";
+    } ];
+    uploadHttp = {
+        domain = "upload.example.org";
+    };
   };
-};
+}
 ```
 
 ## Let's Encrypt Configuration {#module-services-prosody-letsencrypt}
@@ -57,16 +59,18 @@ certificate by leveraging the ACME
 
 Provided the setup detailed in the previous section, you'll need the following acme configuration to generate
 a TLS certificate for the three endponits:
-```
-security.acme = {
-  email = "root@example.org";
-  acceptTerms = true;
-  certs = {
-    "example.org" = {
-      webroot = "/var/www/example.org";
-      email = "root@example.org";
-      extraDomainNames = [ "conference.example.org" "upload.example.org" ];
+```nix
+{
+  security.acme = {
+    email = "root@example.org";
+    acceptTerms = true;
+    certs = {
+      "example.org" = {
+        webroot = "/var/www/example.org";
+        email = "root@example.org";
+        extraDomainNames = [ "conference.example.org" "upload.example.org" ];
+      };
     };
   };
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-control.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-control.nix
new file mode 100644
index 000000000000..fdf3a9ba3cc1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-control.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-control;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "cs";
+      config_dir = "/etc/scion";
+      reconnect_to_dispatcher = true;
+    };
+    beacon_db = {
+      connection = "/var/lib/scion-control/control.beacon.db";
+    };
+    path_db = {
+      connection = "/var/lib/scion-control/control.path.db";
+    };
+    trust_db = {
+      connection = "/var/lib/scion-control/control.trust.db";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-control.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-control = {
+    enable = mkEnableOption (lib.mdDoc "the scion-control service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          path_db = {
+            connection = "/var/lib/scion-control/control.path.db";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-control configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-control = {
+      description = "SCION Control Service";
+      after = [ "network-online.target" "scion-dispatcher.service" ];
+      wants = [ "network-online.target" "scion-dispatcher.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Group = if (config.services.scion.scion-dispatcher.enable == true) then "scion" else null;
+        ExecStart = "${pkgs.scion}/bin/scion-control --config ${configFile}";
+        DynamicUser = true;
+        Restart = "on-failure";
+        BindPaths = [ "/dev/shm:/run/shm" ];
+        StateDirectory = "scion-control";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix
new file mode 100644
index 000000000000..0bcc18771fc3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-daemon;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "sd";
+      config_dir = "/etc/scion";
+      reconnect_to_dispatcher = true;
+    };
+    path_db = {
+      connection = "/var/lib/scion-daemon/sd.path.db";
+    };
+    trust_db = {
+      connection = "/var/lib/scion-daemon/sd.trust.db";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-daemon.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-daemon = {
+    enable = mkEnableOption (lib.mdDoc "the scion-daemon service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          path_db = {
+            connection = "/var/lib/scion-daemon/sd.path.db";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-daemon configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-daemon = {
+      description = "SCION Daemon";
+      after = [ "network-online.target" "scion-dispatcher.service" ];
+      wants = [ "network-online.target" "scion-dispatcher.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.scion}/bin/scion-daemon --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        StateDirectory = "scion-daemon";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix
new file mode 100644
index 000000000000..bab1ec0a989b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-dispatcher;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    dispatcher = {
+      id = "dispatcher";
+      socket_file_mode = "0770";
+      application_socket = "/dev/shm/dispatcher/default.sock";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-dispatcher.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-dispatcher = {
+    enable = mkEnableOption (lib.mdDoc "the scion-dispatcher service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          dispatcher = {
+            id = "dispatcher";
+            socket_file_mode = "0770";
+            application_socket = "/dev/shm/dispatcher/default.sock";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-dispatcher configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    # Needed for group ownership of the dispatcher socket
+    users.groups.scion = {};
+
+    # scion programs hardcode path to dispatcher in /run/shm, and is not
+    # configurable at runtime upstream plans to obsolete the dispatcher in
+    # favor of an SCMP daemon, at which point this can be removed.
+    system.activationScripts.scion-dispatcher = ''
+      ln -sf /dev/shm /run/shm
+    '';
+
+    systemd.services.scion-dispatcher = {
+      description = "SCION Dispatcher";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Group = "scion";
+        DynamicUser = true;
+        BindPaths = [ "/dev/shm:/run/shm" ];
+        ExecStartPre = "${pkgs.coreutils}/bin/rm -rf /run/shm/dispatcher";
+        ExecStart = "${pkgs.scion}/bin/scion-dispatcher --config ${configFile}";
+        Restart = "on-failure";
+        StateDirectory = "scion-dispatcher";
+      };
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-router.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-router.nix
new file mode 100644
index 000000000000..cbe83c6dbf8d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-router.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-router;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "br";
+      config_dir = "/etc/scion";
+    };
+  };
+  configFile = toml.generate "scion-router.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-router = {
+    enable = mkEnableOption (lib.mdDoc "the scion-router service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          general.id = "br";
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-router configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-router = {
+      description = "SCION Router";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.scion}/bin/scion-router --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        StateDirectory = "scion-router";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion.nix b/nixpkgs/nixos/modules/services/networking/scion/scion.nix
new file mode 100644
index 000000000000..704f942b5d9e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion.nix
@@ -0,0 +1,39 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion;
+in
+{
+  options.services.scion = {
+    enable = mkEnableOption (lib.mdDoc "all of the scion components and services");
+    bypassBootstrapWarning = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        bypass Nix warning about SCION PKI bootstrapping
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    services.scion = {
+      scion-dispatcher.enable = true;
+      scion-daemon.enable = true;
+      scion-router.enable = true;
+      scion-control.enable = true;
+    };
+    assertions = [
+      { assertion = cfg.bypassBootstrapWarning == true;
+        message = ''
+          SCION is a routing protocol and requires bootstrapping with a manual, imperative key signing ceremony. You may want to join an existing Isolation Domain (ISD) such as scionlab.org, or bootstrap your own. If you have completed and configured the public key infrastructure for SCION and are sure this process is complete, then add the following to your configuration:
+
+          services.scion.bypassBootstrapWarning = true;
+
+          refer to docs.scion.org for more information
+        '';
+      }
+    ];
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/yggdrasil.md b/nixpkgs/nixos/modules/services/networking/yggdrasil.md
index bbaea5bc74aa..7b899f9d6ddb 100644
--- a/nixpkgs/nixos/modules/services/networking/yggdrasil.md
+++ b/nixpkgs/nixos/modules/services/networking/yggdrasil.md
@@ -12,7 +12,7 @@ self-arranging IPv6 network.
 ### Simple ephemeral node {#module-services-networking-yggdrasil-configuration-simple}
 
 An annotated example of a simple configuration:
-```
+```nix
 {
   services.yggdrasil = {
     enable = true;
@@ -39,7 +39,7 @@ An annotated example of a simple configuration:
 ### Persistent node with prefix {#module-services-networking-yggdrasil-configuration-prefix}
 
 A node with a fixed address that announces a prefix:
-```
+```nix
 let
   address = "210:5217:69c0:9afc:1b95:b9f:8718:c3d2";
   prefix = "310:5217:69c0:9afc";
@@ -90,7 +90,7 @@ in {
 
 A NixOS container attached to the Yggdrasil network via a node running on the
 host:
-```
+```nix
 let
   yggPrefix64 = "310:5217:69c0:9afc";
     # Again, taken from the output of "yggdrasilctl getself".
diff --git a/nixpkgs/nixos/modules/services/search/meilisearch.md b/nixpkgs/nixos/modules/services/search/meilisearch.md
index 299f56bf8293..b9f65861b1d1 100644
--- a/nixpkgs/nixos/modules/services/search/meilisearch.md
+++ b/nixpkgs/nixos/modules/services/search/meilisearch.md
@@ -7,7 +7,9 @@ Meilisearch is a lightweight, fast and powerful search engine. Think elastic sea
 the minimum to start meilisearch is
 
 ```nix
-services.meilisearch.enable = true;
+{
+  services.meilisearch.enable = true;
+}
 ```
 
 this will start the http server included with meilisearch on port 7700.
diff --git a/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix b/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix
index b8e45f67cf78..dd3ded6259c4 100644
--- a/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix
+++ b/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix
@@ -13,6 +13,17 @@ in
         The address of the reverse proxy endpoint for oauth2_proxy
       '';
     };
+
+    domain = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The domain under which the oauth2_proxy will be accesible and the path of cookies are set to.
+        This setting must be set to ensure back-redirects are working properly
+        if oauth2-proxy is configured with {option}`services.oauth2_proxy.cookie.domain`
+        or multiple {option}`services.oauth2_proxy.nginx.virtualHosts` that are not on the same domain.
+      '';
+    };
+
     virtualHosts = mkOption {
       type = types.listOf types.str;
       default = [];
@@ -21,22 +32,26 @@ in
       '';
     };
   };
+
   config.services.oauth2_proxy = mkIf (cfg.virtualHosts != [] && (hasPrefix "127.0.0.1:" cfg.proxy)) {
     enable = true;
   };
-  config.services.nginx = mkIf config.services.oauth2_proxy.enable (mkMerge
-  ((optional (cfg.virtualHosts != []) {
-    recommendedProxySettings = true; # needed because duplicate headers
-  }) ++ (map (vhost: {
-    virtualHosts.${vhost} = {
-      locations."/oauth2/" = {
+
+  config.services.nginx = mkIf (cfg.virtualHosts != [] && config.services.oauth2_proxy.enable) (mkMerge ([
+    {
+      virtualHosts.${cfg.domain}.locations."/oauth2/" = {
         proxyPass = cfg.proxy;
         extraConfig = ''
           proxy_set_header X-Scheme                $scheme;
           proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
         '';
       };
-      locations."/oauth2/auth" = {
+    }
+  ] ++ optional (cfg.virtualHosts != []) {
+    recommendedProxySettings = true; # needed because duplicate headers
+  } ++ (map (vhost: {
+    virtualHosts.${vhost}.locations = {
+      "/oauth2/auth" = {
         proxyPass = cfg.proxy;
         extraConfig = ''
           proxy_set_header X-Scheme         $scheme;
@@ -45,9 +60,10 @@ in
           proxy_pass_request_body           off;
         '';
       };
-      locations."/".extraConfig = ''
+      "@redirectToAuth2ProxyLogin".return = "307 https://${cfg.domain}/oauth2/start?rd=$scheme://$host$request_uri";
+      "/".extraConfig = ''
         auth_request /oauth2/auth;
-        error_page 401 = /oauth2/sign_in;
+        error_page 401 = @redirectToAuth2ProxyLogin;
 
         # pass information via X-User and X-Email headers to backend,
         # requires running with --set-xauthrequest flag
@@ -60,7 +76,6 @@ in
         auth_request_set $auth_cookie $upstream_http_set_cookie;
         add_header Set-Cookie $auth_cookie;
       '';
-
     };
   }) cfg.virtualHosts)));
 }
diff --git a/nixpkgs/nixos/modules/services/security/usbguard.nix b/nixpkgs/nixos/modules/services/security/usbguard.nix
index f167fbb2eca8..ff54176e13d3 100644
--- a/nixpkgs/nixos/modules/services/security/usbguard.nix
+++ b/nixpkgs/nixos/modules/services/security/usbguard.nix
@@ -80,7 +80,7 @@ in
       };
 
       implicitPolicyTarget = mkOption {
-        type = policy;
+        type = types.enum [ "allow" "block" "reject" ];
         default = "block";
         description = lib.mdDoc ''
           How to treat USB devices that don't match any rule in the policy.
@@ -110,7 +110,7 @@ in
       };
 
       insertedDevicePolicy = mkOption {
-        type = policy;
+        type = types.enum [ "block" "reject" "apply-policy" ];
         default = "apply-policy";
         description = lib.mdDoc ''
           How to treat USB devices that are already connected after the daemon
diff --git a/nixpkgs/nixos/modules/services/video/photonvision.nix b/nixpkgs/nixos/modules/services/video/photonvision.nix
new file mode 100644
index 000000000000..fdbe9da3999d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/photonvision.nix
@@ -0,0 +1,64 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.photonvision;
+in
+{
+  options = {
+    services.photonvision = {
+      enable = lib.mkEnableOption (lib.mdDoc "Enable PhotonVision");
+
+      package = lib.mkPackageOption pkgs "photonvision" {};
+
+      openFirewall = lib.mkOption {
+        description = lib.mdDoc ''
+          Whether to open the required ports in the firewall.
+        '';
+        default = false;
+        type = lib.types.bool;
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.photonvision = {
+      description = "PhotonVision, the free, fast, and easy-to-use computer vision solution for the FIRST Robotics Competition";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart = lib.getExe cfg.package;
+
+        # ephemeral root directory
+        RuntimeDirectory = "photonvision";
+        RootDirectory = "/run/photonvision";
+
+        # setup persistent state and logs directories
+        StateDirectory = "photonvision";
+        LogsDirectory = "photonvision";
+
+        BindReadOnlyPaths = [
+          # mount the nix store read-only
+          "/nix/store"
+
+          # the JRE reads the user.home property from /etc/passwd
+          "/etc/passwd"
+        ];
+        BindPaths = [
+          # mount the configuration and logs directories to the host
+          "/var/lib/photonvision:/photonvision_config"
+          "/var/log/photonvision:/photonvision_config/logs"
+        ];
+
+        # for PhotonVision's dynamic libraries, which it writes to /tmp
+        PrivateTmp = true;
+      };
+    };
+
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 5800 ];
+      allowedTCPPortRanges = [{ from = 1180; to = 1190; }];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/akkoma.md b/nixpkgs/nixos/modules/services/web-apps/akkoma.md
index 83dd1a8b35f2..13b074b228a4 100644
--- a/nixpkgs/nixos/modules/services/web-apps/akkoma.md
+++ b/nixpkgs/nixos/modules/services/web-apps/akkoma.md
@@ -19,21 +19,23 @@ be run behind a HTTP proxy on `fediverse.example.com`.
 
 
 ```nix
-services.akkoma.enable = true;
-services.akkoma.config = {
-  ":pleroma" = {
-    ":instance" = {
-      name = "My Akkoma instance";
-      description = "More detailed description";
-      email = "admin@example.com";
-      registration_open = false;
-    };
-
-    "Pleroma.Web.Endpoint" = {
-      url.host = "fediverse.example.com";
+{
+  services.akkoma.enable = true;
+  services.akkoma.config = {
+    ":pleroma" = {
+      ":instance" = {
+        name = "My Akkoma instance";
+        description = "More detailed description";
+        email = "admin@example.com";
+        registration_open = false;
+      };
+
+      "Pleroma.Web.Endpoint" = {
+        url.host = "fediverse.example.com";
+      };
     };
   };
-};
+}
 ```
 
 Please refer to the [configuration cheat sheet](https://docs.akkoma.dev/stable/configuration/cheatsheet/)
@@ -55,19 +57,21 @@ Although it is possible to expose Akkoma directly, it is common practice to oper
 HTTP reverse proxy such as nginx.
 
 ```nix
-services.akkoma.nginx = {
-  enableACME = true;
-  forceSSL = true;
-};
-
-services.nginx = {
-  enable = true;
-
-  clientMaxBodySize = "16m";
-  recommendedTlsSettings = true;
-  recommendedOptimisation = true;
-  recommendedGzipSettings = true;
-};
+{
+  services.akkoma.nginx = {
+    enableACME = true;
+    forceSSL = true;
+  };
+
+  services.nginx = {
+    enable = true;
+
+    clientMaxBodySize = "16m";
+    recommendedTlsSettings = true;
+    recommendedOptimisation = true;
+    recommendedGzipSettings = true;
+  };
+}
 ```
 
 Please refer to [](#module-security-acme) for details on how to provision an SSL/TLS certificate.
@@ -78,51 +82,53 @@ Without the media proxy function, Akkoma does not store any remote media like pi
 locally, and clients have to fetch them directly from the source server.
 
 ```nix
-# Enable nginx slice module distributed with Tengine
-services.nginx.package = pkgs.tengine;
-
-# Enable media proxy
-services.akkoma.config.":pleroma".":media_proxy" = {
-  enabled = true;
-  proxy_opts.redirect_on_failure = true;
-};
-
-# Adjust the persistent cache size as needed:
-#  Assuming an average object size of 128 KiB, around 1 MiB
-#  of memory is required for the key zone per GiB of cache.
-# Ensure that the cache directory exists and is writable by nginx.
-services.nginx.commonHttpConfig = ''
-  proxy_cache_path /var/cache/nginx/cache/akkoma-media-cache
-    levels= keys_zone=akkoma_media_cache:16m max_size=16g
-    inactive=1y use_temp_path=off;
-'';
-
-services.akkoma.nginx = {
-  locations."/proxy" = {
-    proxyPass = "http://unix:/run/akkoma/socket";
-
-    extraConfig = ''
-      proxy_cache akkoma_media_cache;
-
-      # Cache objects in slices of 1 MiB
-      slice 1m;
-      proxy_cache_key $host$uri$is_args$args$slice_range;
-      proxy_set_header Range $slice_range;
-
-      # Decouple proxy and upstream responses
-      proxy_buffering on;
-      proxy_cache_lock on;
-      proxy_ignore_client_abort on;
-
-      # Default cache times for various responses
-      proxy_cache_valid 200 1y;
-      proxy_cache_valid 206 301 304 1h;
-
-      # Allow serving of stale items
-      proxy_cache_use_stale error timeout invalid_header updating;
-    '';
+{
+  # Enable nginx slice module distributed with Tengine
+  services.nginx.package = pkgs.tengine;
+
+  # Enable media proxy
+  services.akkoma.config.":pleroma".":media_proxy" = {
+    enabled = true;
+    proxy_opts.redirect_on_failure = true;
   };
-};
+
+  # Adjust the persistent cache size as needed:
+  #  Assuming an average object size of 128 KiB, around 1 MiB
+  #  of memory is required for the key zone per GiB of cache.
+  # Ensure that the cache directory exists and is writable by nginx.
+  services.nginx.commonHttpConfig = ''
+    proxy_cache_path /var/cache/nginx/cache/akkoma-media-cache
+      levels= keys_zone=akkoma_media_cache:16m max_size=16g
+      inactive=1y use_temp_path=off;
+  '';
+
+  services.akkoma.nginx = {
+    locations."/proxy" = {
+      proxyPass = "http://unix:/run/akkoma/socket";
+
+      extraConfig = ''
+        proxy_cache akkoma_media_cache;
+
+        # Cache objects in slices of 1 MiB
+        slice 1m;
+        proxy_cache_key $host$uri$is_args$args$slice_range;
+        proxy_set_header Range $slice_range;
+
+        # Decouple proxy and upstream responses
+        proxy_buffering on;
+        proxy_cache_lock on;
+        proxy_ignore_client_abort on;
+
+        # Default cache times for various responses
+        proxy_cache_valid 200 1y;
+        proxy_cache_valid 206 301 304 1h;
+
+        # Allow serving of stale items
+        proxy_cache_use_stale error timeout invalid_header updating;
+      '';
+    };
+  };
+}
 ```
 
 #### Prefetch remote media {#modules-services-akkoma-prefetch-remote-media}
@@ -132,10 +138,12 @@ fetches all media associated with a post through the media proxy, as soon as the
 received by the instance.
 
 ```nix
-services.akkoma.config.":pleroma".":mrf".policies =
-  map (pkgs.formats.elixirConf { }).lib.mkRaw [
-    "Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy"
-];
+{
+  services.akkoma.config.":pleroma".":mrf".policies =
+    map (pkgs.formats.elixirConf { }).lib.mkRaw [
+      "Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy"
+  ];
+}
 ```
 
 #### Media previews {#modules-services-akkoma-media-previews}
@@ -143,11 +151,13 @@ services.akkoma.config.":pleroma".":mrf".policies =
 Akkoma can generate previews for media.
 
 ```nix
-services.akkoma.config.":pleroma".":media_preview_proxy" = {
-  enabled = true;
-  thumbnail_max_width = 1920;
-  thumbnail_max_height = 1080;
-};
+{
+  services.akkoma.config.":pleroma".":media_preview_proxy" = {
+    enabled = true;
+    thumbnail_max_width = 1920;
+    thumbnail_max_height = 1080;
+  };
+}
 ```
 
 ## Frontend management {#modules-services-akkoma-frontend-management}
@@ -160,29 +170,31 @@ The following example overrides the primary frontend’s default configuration u
 derivation.
 
 ```nix
-services.akkoma.frontends.primary.package = pkgs.runCommand "akkoma-fe" {
-  config = builtins.toJSON {
-    expertLevel = 1;
-    collapseMessageWithSubject = false;
-    stopGifs = false;
-    replyVisibility = "following";
-    webPushHideIfCW = true;
-    hideScopeNotice = true;
-    renderMisskeyMarkdown = false;
-    hideSiteFavicon = true;
-    postContentType = "text/markdown";
-    showNavShortcuts = false;
-  };
-  nativeBuildInputs = with pkgs; [ jq xorg.lndir ];
-  passAsFile = [ "config" ];
-} ''
-  mkdir $out
-  lndir ${pkgs.akkoma-frontends.akkoma-fe} $out
-
-  rm $out/static/config.json
-  jq -s add ${pkgs.akkoma-frontends.akkoma-fe}/static/config.json ${config} \
-    >$out/static/config.json
-'';
+{
+  services.akkoma.frontends.primary.package = pkgs.runCommand "akkoma-fe" {
+    config = builtins.toJSON {
+      expertLevel = 1;
+      collapseMessageWithSubject = false;
+      stopGifs = false;
+      replyVisibility = "following";
+      webPushHideIfCW = true;
+      hideScopeNotice = true;
+      renderMisskeyMarkdown = false;
+      hideSiteFavicon = true;
+      postContentType = "text/markdown";
+      showNavShortcuts = false;
+    };
+    nativeBuildInputs = with pkgs; [ jq xorg.lndir ];
+    passAsFile = [ "config" ];
+  } ''
+    mkdir $out
+    lndir ${pkgs.akkoma-frontends.akkoma-fe} $out
+
+    rm $out/static/config.json
+    jq -s add ${pkgs.akkoma-frontends.akkoma-fe}/static/config.json ${config} \
+      >$out/static/config.json
+  '';
+}
 ```
 
 ## Federation policies {#modules-services-akkoma-federation-policies}
@@ -198,28 +210,30 @@ of the fediverse and providing a pleasant experience to the users of an instance
 
 
 ```nix
-services.akkoma.config.":pleroma" = with (pkgs.formats.elixirConf { }).lib; {
-  ":mrf".policies = map mkRaw [
-    "Pleroma.Web.ActivityPub.MRF.SimplePolicy"
-  ];
-
-  ":mrf_simple" = {
-    # Tag all media as sensitive
-    media_nsfw = mkMap {
-      "nsfw.weird.kinky" = "Untagged NSFW content";
-    };
-
-    # Reject all activities except deletes
-    reject = mkMap {
-      "kiwifarms.cc" = "Persistent harassment of users, no moderation";
-    };
-
-    # Force posts to be visible by followers only
-    followers_only = mkMap {
-      "beta.birdsite.live" = "Avoid polluting timelines with Twitter posts";
+{
+  services.akkoma.config.":pleroma" = with (pkgs.formats.elixirConf { }).lib; {
+    ":mrf".policies = map mkRaw [
+      "Pleroma.Web.ActivityPub.MRF.SimplePolicy"
+    ];
+
+    ":mrf_simple" = {
+      # Tag all media as sensitive
+      media_nsfw = mkMap {
+        "nsfw.weird.kinky" = "Untagged NSFW content";
+      };
+
+      # Reject all activities except deletes
+      reject = mkMap {
+        "kiwifarms.cc" = "Persistent harassment of users, no moderation";
+      };
+
+      # Force posts to be visible by followers only
+      followers_only = mkMap {
+        "beta.birdsite.live" = "Avoid polluting timelines with Twitter posts";
+      };
     };
   };
-};
+}
 ```
 
 ## Upload filters {#modules-services-akkoma-upload-filters}
@@ -228,12 +242,14 @@ This example strips GPS and location metadata from uploads, deduplicates them an
 the file name.
 
 ```nix
-services.akkoma.config.":pleroma"."Pleroma.Upload".filters =
-  map (pkgs.formats.elixirConf { }).lib.mkRaw [
-    "Pleroma.Upload.Filter.Exiftool"
-    "Pleroma.Upload.Filter.Dedupe"
-    "Pleroma.Upload.Filter.AnonymizeFilename"
-  ];
+{
+  services.akkoma.config.":pleroma"."Pleroma.Upload".filters =
+    map (pkgs.formats.elixirConf { }).lib.mkRaw [
+      "Pleroma.Upload.Filter.Exiftool"
+      "Pleroma.Upload.Filter.Dedupe"
+      "Pleroma.Upload.Filter.AnonymizeFilename"
+    ];
+}
 ```
 
 ## Migration from Pleroma {#modules-services-akkoma-migration-pleroma}
@@ -286,9 +302,11 @@ To re‐use the Pleroma data in place, disable Pleroma and enable Akkoma, pointi
 Pleroma database and upload directory.
 
 ```nix
-# Adjust these settings according to the database name and upload directory path used by Pleroma
-services.akkoma.config.":pleroma"."Pleroma.Repo".database = "pleroma";
-services.akkoma.config.":pleroma".":instance".upload_dir = "/var/lib/pleroma/uploads";
+{
+  # Adjust these settings according to the database name and upload directory path used by Pleroma
+  services.akkoma.config.":pleroma"."Pleroma.Repo".database = "pleroma";
+  services.akkoma.config.":pleroma".":instance".upload_dir = "/var/lib/pleroma/uploads";
+}
 ```
 
 Please keep in mind that after the Akkoma service has been started, any migrations applied by
@@ -304,7 +322,9 @@ details.
 The Akkoma systemd service may be confined to a chroot with
 
 ```nix
-services.systemd.akkoma.confinement.enable = true;
+{
+  services.systemd.akkoma.confinement.enable = true;
+}
 ```
 
 Confinement of services is not generally supported in NixOS and therefore disabled by default.
diff --git a/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md b/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md
index 236953bd4ff7..d8e59b3ad210 100644
--- a/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md
+++ b/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md
@@ -4,7 +4,7 @@ c2FmZQ is an application that can securely encrypt, store, and share files,
 including but not limited to pictures and videos.
 
 The service `c2fmzq-server` can be enabled by setting
-```
+```nix
 {
   services.c2fmzq-server.enable = true;
 }
@@ -17,7 +17,7 @@ In principle the server can be exposed directly on a public interface and there
 are command line options to manage HTTPS certificates directly, but the module
 is designed to be served behind a reverse proxy or only accessed via localhost.
 
-```
+```nix
 {
   services.c2fmzq-server = {
     enable = true;
diff --git a/nixpkgs/nixos/modules/services/web-apps/discourse.md b/nixpkgs/nixos/modules/services/web-apps/discourse.md
index 35180bea87d9..d4b9c93c4ead 100644
--- a/nixpkgs/nixos/modules/services/web-apps/discourse.md
+++ b/nixpkgs/nixos/modules/services/web-apps/discourse.md
@@ -6,20 +6,22 @@ modern and open source discussion platform.
 ## Basic usage {#module-services-discourse-basic-usage}
 
 A minimal configuration using Let's Encrypt for TLS certificates looks like this:
-```
-services.discourse = {
-  enable = true;
-  hostname = "discourse.example.com";
-  admin = {
-    email = "admin@example.com";
-    username = "admin";
-    fullName = "Administrator";
-    passwordFile = "/path/to/password_file";
+```nix
+{
+  services.discourse = {
+    enable = true;
+    hostname = "discourse.example.com";
+    admin = {
+      email = "admin@example.com";
+      username = "admin";
+      fullName = "Administrator";
+      passwordFile = "/path/to/password_file";
+    };
+    secretKeyBaseFile = "/path/to/secret_key_base_file";
   };
-  secretKeyBaseFile = "/path/to/secret_key_base_file";
-};
-security.acme.email = "me@example.com";
-security.acme.acceptTerms = true;
+  security.acme.email = "me@example.com";
+  security.acme.acceptTerms = true;
+}
 ```
 
 Provided a proper DNS setup, you'll be able to connect to the
@@ -34,20 +36,22 @@ the [](#opt-services.discourse.sslCertificate)
 and [](#opt-services.discourse.sslCertificateKey)
 options:
 
-```
-services.discourse = {
-  enable = true;
-  hostname = "discourse.example.com";
-  sslCertificate = "/path/to/ssl_certificate";
-  sslCertificateKey = "/path/to/ssl_certificate_key";
-  admin = {
-    email = "admin@example.com";
-    username = "admin";
-    fullName = "Administrator";
-    passwordFile = "/path/to/password_file";
+```nix
+{
+  services.discourse = {
+    enable = true;
+    hostname = "discourse.example.com";
+    sslCertificate = "/path/to/ssl_certificate";
+    sslCertificateKey = "/path/to/ssl_certificate_key";
+    admin = {
+      email = "admin@example.com";
+      username = "admin";
+      fullName = "Administrator";
+      passwordFile = "/path/to/password_file";
+    };
+    secretKeyBaseFile = "/path/to/secret_key_base_file";
   };
-  secretKeyBaseFile = "/path/to/secret_key_base_file";
-};
+}
 ```
 
 ## Database access {#module-services-discourse-database}
@@ -80,27 +84,29 @@ A basic setup which assumes you want to use your configured
 [hostname](#opt-services.discourse.hostname) as
 email domain can be done like this:
 
-```
-services.discourse = {
-  enable = true;
-  hostname = "discourse.example.com";
-  sslCertificate = "/path/to/ssl_certificate";
-  sslCertificateKey = "/path/to/ssl_certificate_key";
-  admin = {
-    email = "admin@example.com";
-    username = "admin";
-    fullName = "Administrator";
-    passwordFile = "/path/to/password_file";
-  };
-  mail.outgoing = {
-    serverAddress = "smtp.emailprovider.com";
-    port = 587;
-    username = "user@emailprovider.com";
-    passwordFile = "/path/to/smtp_password_file";
+```nix
+{
+  services.discourse = {
+    enable = true;
+    hostname = "discourse.example.com";
+    sslCertificate = "/path/to/ssl_certificate";
+    sslCertificateKey = "/path/to/ssl_certificate_key";
+    admin = {
+      email = "admin@example.com";
+      username = "admin";
+      fullName = "Administrator";
+      passwordFile = "/path/to/password_file";
+    };
+    mail.outgoing = {
+      serverAddress = "smtp.emailprovider.com";
+      port = 587;
+      username = "user@emailprovider.com";
+      passwordFile = "/path/to/smtp_password_file";
+    };
+    mail.incoming.enable = true;
+    secretKeyBaseFile = "/path/to/secret_key_base_file";
   };
-  mail.incoming.enable = true;
-  secretKeyBaseFile = "/path/to/secret_key_base_file";
-};
+}
 ```
 
 This assumes you have set up an MX record for the address you've
@@ -162,44 +168,46 @@ The following example sets the title and description of the
 Discourse instance and enables
 GitHub login in the site settings,
 and changes a few request limits in the backend settings:
-```
-services.discourse = {
-  enable = true;
-  hostname = "discourse.example.com";
-  sslCertificate = "/path/to/ssl_certificate";
-  sslCertificateKey = "/path/to/ssl_certificate_key";
-  admin = {
-    email = "admin@example.com";
-    username = "admin";
-    fullName = "Administrator";
-    passwordFile = "/path/to/password_file";
-  };
-  mail.outgoing = {
-    serverAddress = "smtp.emailprovider.com";
-    port = 587;
-    username = "user@emailprovider.com";
-    passwordFile = "/path/to/smtp_password_file";
-  };
-  mail.incoming.enable = true;
-  siteSettings = {
-    required = {
-      title = "My Cats";
-      site_description = "Discuss My Cats (and be nice plz)";
+```nix
+{
+  services.discourse = {
+    enable = true;
+    hostname = "discourse.example.com";
+    sslCertificate = "/path/to/ssl_certificate";
+    sslCertificateKey = "/path/to/ssl_certificate_key";
+    admin = {
+      email = "admin@example.com";
+      username = "admin";
+      fullName = "Administrator";
+      passwordFile = "/path/to/password_file";
     };
-    login = {
-      enable_github_logins = true;
-      github_client_id = "a2f6dfe838cb3206ce20";
-      github_client_secret._secret = /run/keys/discourse_github_client_secret;
+    mail.outgoing = {
+      serverAddress = "smtp.emailprovider.com";
+      port = 587;
+      username = "user@emailprovider.com";
+      passwordFile = "/path/to/smtp_password_file";
     };
+    mail.incoming.enable = true;
+    siteSettings = {
+      required = {
+        title = "My Cats";
+        site_description = "Discuss My Cats (and be nice plz)";
+      };
+      login = {
+        enable_github_logins = true;
+        github_client_id = "a2f6dfe838cb3206ce20";
+        github_client_secret._secret = /run/keys/discourse_github_client_secret;
+      };
+    };
+    backendSettings = {
+      max_reqs_per_ip_per_minute = 300;
+      max_reqs_per_ip_per_10_seconds = 60;
+      max_asset_reqs_per_ip_per_10_seconds = 250;
+      max_reqs_per_ip_mode = "warn+block";
+    };
+    secretKeyBaseFile = "/path/to/secret_key_base_file";
   };
-  backendSettings = {
-    max_reqs_per_ip_per_minute = 300;
-    max_reqs_per_ip_per_10_seconds = 60;
-    max_asset_reqs_per_ip_per_10_seconds = 250;
-    max_reqs_per_ip_mode = "warn+block";
-  };
-  secretKeyBaseFile = "/path/to/secret_key_base_file";
-};
+}
 ```
 
 In the resulting site settings file, the
@@ -253,34 +261,36 @@ and [discourse-solved](https://github.com/discourse/discourse-solved)
 plugins, and disable `discourse-spoiler-alert`
 by default:
 
-```
-services.discourse = {
-  enable = true;
-  hostname = "discourse.example.com";
-  sslCertificate = "/path/to/ssl_certificate";
-  sslCertificateKey = "/path/to/ssl_certificate_key";
-  admin = {
-    email = "admin@example.com";
-    username = "admin";
-    fullName = "Administrator";
-    passwordFile = "/path/to/password_file";
-  };
-  mail.outgoing = {
-    serverAddress = "smtp.emailprovider.com";
-    port = 587;
-    username = "user@emailprovider.com";
-    passwordFile = "/path/to/smtp_password_file";
-  };
-  mail.incoming.enable = true;
-  plugins = with config.services.discourse.package.plugins; [
-    discourse-spoiler-alert
-    discourse-solved
-  ];
-  siteSettings = {
-    plugins = {
-      spoiler_enabled = false;
+```nix
+{
+  services.discourse = {
+    enable = true;
+    hostname = "discourse.example.com";
+    sslCertificate = "/path/to/ssl_certificate";
+    sslCertificateKey = "/path/to/ssl_certificate_key";
+    admin = {
+      email = "admin@example.com";
+      username = "admin";
+      fullName = "Administrator";
+      passwordFile = "/path/to/password_file";
+    };
+    mail.outgoing = {
+      serverAddress = "smtp.emailprovider.com";
+      port = 587;
+      username = "user@emailprovider.com";
+      passwordFile = "/path/to/smtp_password_file";
+    };
+    mail.incoming.enable = true;
+    plugins = with config.services.discourse.package.plugins; [
+      discourse-spoiler-alert
+      discourse-solved
+    ];
+    siteSettings = {
+      plugins = {
+        spoiler_enabled = false;
+      };
     };
+    secretKeyBaseFile = "/path/to/secret_key_base_file";
   };
-  secretKeyBaseFile = "/path/to/secret_key_base_file";
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/web-apps/gotosocial.md b/nixpkgs/nixos/modules/services/web-apps/gotosocial.md
index a290d7d1893a..b3540f0d5811 100644
--- a/nixpkgs/nixos/modules/services/web-apps/gotosocial.md
+++ b/nixpkgs/nixos/modules/services/web-apps/gotosocial.md
@@ -8,17 +8,19 @@ The following configuration sets up the PostgreSQL as database backend and binds
 GoToSocial to `127.0.0.1:8080`, expecting to be run behind a HTTP proxy on `gotosocial.example.com`.
 
 ```nix
-services.gotosocial = {
-  enable = true;
-  setupPostgresqlDB = true;
-  settings = {
-    application-name = "My GoToSocial";
-    host = "gotosocial.example.com";
-    protocol = "https";
-    bind-address = "127.0.0.1";
-    port = 8080;
+{
+  services.gotosocial = {
+    enable = true;
+    setupPostgresqlDB = true;
+    settings = {
+      application-name = "My GoToSocial";
+      host = "gotosocial.example.com";
+      protocol = "https";
+      bind-address = "127.0.0.1";
+      port = 8080;
+    };
   };
-};
+}
 ```
 
 Please refer to the [GoToSocial Documentation](https://docs.gotosocial.org/en/latest/configuration/general/)
@@ -30,24 +32,26 @@ Although it is possible to expose GoToSocial directly, it is common practice to
 HTTP reverse proxy such as nginx.
 
 ```nix
-networking.firewall.allowedTCPPorts = [ 80 443 ];
-services.nginx = {
-  enable = true;
-  clientMaxBodySize = "40M";
-  virtualHosts = with config.services.gotosocial.settings; {
-    "${host}" = {
-      enableACME = true;
-      forceSSL = true;
-      locations = {
-        "/" = {
-          recommendedProxySettings = true;
-          proxyWebsockets = true;
-          proxyPass = "http://${bind-address}:${toString port}";
+{
+  networking.firewall.allowedTCPPorts = [ 80 443 ];
+  services.nginx = {
+    enable = true;
+    clientMaxBodySize = "40M";
+    virtualHosts = with config.services.gotosocial.settings; {
+      "${host}" = {
+        enableACME = true;
+        forceSSL = true;
+        locations = {
+          "/" = {
+            recommendedProxySettings = true;
+            proxyWebsockets = true;
+            proxyPass = "http://${bind-address}:${toString port}";
+          };
         };
       };
     };
   };
-};
+}
 ```
 
 Please refer to [](#module-security-acme) for details on how to provision an SSL/TLS certificate.
diff --git a/nixpkgs/nixos/modules/services/web-apps/grocy.md b/nixpkgs/nixos/modules/services/web-apps/grocy.md
index 62aad4b103df..f4b5769c2479 100644
--- a/nixpkgs/nixos/modules/services/web-apps/grocy.md
+++ b/nixpkgs/nixos/modules/services/web-apps/grocy.md
@@ -6,7 +6,7 @@
 ## Basic usage {#module-services-grocy-basic-usage}
 
 A very basic configuration may look like this:
-```
+```nix
 { pkgs, ... }:
 {
   services.grocy = {
@@ -29,7 +29,7 @@ of the application.
 
 The configuration for `grocy` is located at `/etc/grocy/config.php`.
 By default, the following settings can be defined in the NixOS-configuration:
-```
+```nix
 { pkgs, ... }:
 {
   services.grocy.settings = {
@@ -56,7 +56,7 @@ By default, the following settings can be defined in the NixOS-configuration:
 
 If you want to alter the configuration file on your own, you can do this manually with
 an expression like this:
-```
+```nix
 { lib, ... }:
 {
   environment.etc."grocy/config.php".text = lib.mkAfter ''
diff --git a/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md b/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md
index 060ef9752650..577f82e315be 100644
--- a/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md
+++ b/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md
@@ -6,7 +6,7 @@ private, self-hosted video conferencing solution.
 ## Basic usage {#module-services-jitsi-basic-usage}
 
 A minimal configuration using Let's Encrypt for TLS certificates looks like this:
-```
+```nix
 {
   services.jitsi-meet = {
     enable = true;
@@ -22,7 +22,7 @@ A minimal configuration using Let's Encrypt for TLS certificates looks like this
 ## Configuration {#module-services-jitsi-configuration}
 
 Here is the minimal configuration with additional configurations:
-```
+```nix
 {
   services.jitsi-meet = {
     enable = true;
diff --git a/nixpkgs/nixos/modules/services/web-apps/kavita.nix b/nixpkgs/nixos/modules/services/web-apps/kavita.nix
index c3e39f0b5476..c90697bcfa8b 100644
--- a/nixpkgs/nixos/modules/services/web-apps/kavita.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/kavita.nix
@@ -2,7 +2,18 @@
 
 let
   cfg = config.services.kavita;
-in {
+  settingsFormat = pkgs.formats.json { };
+  appsettings = settingsFormat.generate "appsettings.json" ({ TokenKey = "@TOKEN@"; } // cfg.settings);
+in
+{
+  imports = [
+    (lib.mkChangedOptionModule [ "services" "kavita" "ipAdresses" ] [ "services" "kavita" "settings" "IpAddresses" ] (config:
+      let value = lib.getAttrFromPath [ "services" "kavita" "ipAdresses" ] config; in
+      lib.concatStringsSep "," value
+    ))
+    (lib.mkRenamedOptionModule [ "services" "kavita" "port" ] [ "services" "kavita" "settings" "Port" ])
+  ];
+
   options.services.kavita = {
     enable = lib.mkEnableOption (lib.mdDoc "Kavita reading server");
 
@@ -27,16 +38,31 @@ in {
         It can be generated with `head -c 32 /dev/urandom | base64`.
       '';
     };
-    port = lib.mkOption {
-      default = 5000;
-      type = lib.types.port;
-      description = lib.mdDoc "Port to bind to.";
-    };
-    ipAdresses = lib.mkOption {
-      default = ["0.0.0.0" "::"];
-      type = lib.types.listOf lib.types.str;
-      description = lib.mdDoc "IP Addresses to bind to. The default is to bind
-      to all IPv4 and IPv6 addresses.";
+
+    settings = lib.mkOption {
+      default = { };
+      description = lib.mdDoc ''
+        Kavita configuration options, as configured in {file}`appsettings.json`.
+      '';
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+
+        options = {
+          Port = lib.mkOption {
+            default = 5000;
+            type = lib.types.port;
+            description = lib.mdDoc "Port to bind to.";
+          };
+
+          IpAddresses = lib.mkOption {
+            default = "0.0.0.0,::";
+            type = lib.types.commas;
+            description = lib.mdDoc ''
+              IP Addresses to bind to. The default is to bind to all IPv4 and IPv6 addresses.
+            '';
+          };
+        };
+      };
     };
   };
 
@@ -46,18 +72,15 @@ in {
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
       preStart = ''
-        umask u=rwx,g=rx,o=
-        cat > "${cfg.dataDir}/config/appsettings.json" <<EOF
-        {
-          "TokenKey": "$(cat ${cfg.tokenKeyFile})",
-          "Port": ${toString cfg.port},
-          "IpAddresses": "${lib.concatStringsSep "," cfg.ipAdresses}"
-        }
-        EOF
+        install -m600 ${appsettings} ${lib.escapeShellArg cfg.dataDir}/config/appsettings.json
+        ${pkgs.replace-secret}/bin/replace-secret '@TOKEN@' \
+          ''${CREDENTIALS_DIRECTORY}/token \
+          '${cfg.dataDir}/config/appsettings.json'
       '';
       serviceConfig = {
         WorkingDirectory = cfg.dataDir;
-        ExecStart = "${lib.getExe cfg.package}";
+        LoadCredential = [ "token:${cfg.tokenKeyFile}" ];
+        ExecStart = lib.getExe cfg.package;
         Restart = "always";
         User = cfg.user;
       };
diff --git a/nixpkgs/nixos/modules/services/web-apps/keycloak.md b/nixpkgs/nixos/modules/services/web-apps/keycloak.md
index aa8de40d642b..020bee400348 100644
--- a/nixpkgs/nixos/modules/services/web-apps/keycloak.md
+++ b/nixpkgs/nixos/modules/services/web-apps/keycloak.md
@@ -126,16 +126,18 @@ should be set to. See the description of
 ## Example configuration {#module-services-keycloak-example-config}
 
 A basic configuration with some custom settings could look like this:
-```
-services.keycloak = {
-  enable = true;
-  settings = {
-    hostname = "keycloak.example.com";
-    hostname-strict-backchannel = true;
+```nix
+{
+  services.keycloak = {
+    enable = true;
+    settings = {
+      hostname = "keycloak.example.com";
+      hostname-strict-backchannel = true;
+    };
+    initialAdminPassword = "e6Wcm0RrtegMEHl";  # change on first login
+    sslCertificate = "/run/keys/ssl_cert";
+    sslCertificateKey = "/run/keys/ssl_key";
+    database.passwordFile = "/run/keys/db_password";
   };
-  initialAdminPassword = "e6Wcm0RrtegMEHl";  # change on first login
-  sslCertificate = "/run/keys/ssl_cert";
-  sslCertificateKey = "/run/keys/ssl_key";
-  database.passwordFile = "/run/keys/db_password";
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/web-apps/lemmy.md b/nixpkgs/nixos/modules/services/web-apps/lemmy.md
index faafe096d138..0ed23607d00b 100644
--- a/nixpkgs/nixos/modules/services/web-apps/lemmy.md
+++ b/nixpkgs/nixos/modules/services/web-apps/lemmy.md
@@ -7,13 +7,15 @@ Lemmy is a federated alternative to reddit in rust.
 the minimum to start lemmy is
 
 ```nix
-services.lemmy = {
-  enable = true;
-  settings = {
-    hostname = "lemmy.union.rocks";
-    database.createLocally = true;
+{
+  services.lemmy = {
+    enable = true;
+    settings = {
+      hostname = "lemmy.union.rocks";
+      database.createLocally = true;
+    };
+    caddy.enable = true;
   };
-  caddy.enable = true;
 }
 ```
 
diff --git a/nixpkgs/nixos/modules/services/web-apps/nextcloud.md b/nixpkgs/nixos/modules/services/web-apps/nextcloud.md
index 5db83d7e4463..06a8712b0b8a 100644
--- a/nixpkgs/nixos/modules/services/web-apps/nextcloud.md
+++ b/nixpkgs/nixos/modules/services/web-apps/nextcloud.md
@@ -25,7 +25,7 @@ to `true`, Nextcloud will automatically be configured to connect to it through
 socket.
 
 A very basic configuration may look like this:
-```
+```nix
 { pkgs, ... }:
 {
   services.nextcloud = {
@@ -130,7 +130,7 @@ settings `listen.owner` &amp; `listen.group` in the
 [corresponding `phpfpm` pool](#opt-services.phpfpm.pools).
 
 An exemplary configuration may look like this:
-```
+```nix
 { config, lib, pkgs, ... }: {
   services.nginx.enable = false;
   services.nextcloud = {
@@ -205,7 +205,7 @@ If major-releases will be abandoned by upstream, we should check first if those
 in NixOS for a safe upgrade-path before removing those. In that case we should keep those
 packages, but mark them as insecure in an expression like this (in
 `<nixpkgs/pkgs/servers/nextcloud/default.nix>`):
-```
+```nix
 /* ... */
 {
   nextcloud17 = generic {
diff --git a/nixpkgs/nixos/modules/services/web-apps/peertube.nix b/nixpkgs/nixos/modules/services/web-apps/peertube.nix
index 39c02c81c423..76f869913592 100644
--- a/nixpkgs/nixos/modules/services/web-apps/peertube.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/peertube.nix
@@ -61,18 +61,16 @@ let
     eval -- "\$@"
   '';
 
-  peertubeCli = pkgs.writeShellScriptBin "peertube" ''
-    node ~/dist/server/tools/peertube.js $@
+  nginxCommonHeaders = lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.forceSSL ''
+    add_header Strict-Transport-Security 'max-age=31536000';
+  '' + lib.optionalString (config.services.nginx.virtualHosts.${cfg.localDomain}.quic && config.services.nginx.virtualHosts.${cfg.localDomain}.http3) ''
+    add_header Alt-Svc 'h3=":$server_port"; ma=604800';
   '';
 
-  nginxCommonHeaders = lib.optionalString cfg.enableWebHttps ''
-    add_header Strict-Transport-Security      'max-age=63072000; includeSubDomains';
-  '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
-    add_header Alt-Svc                        'h3=":443"; ma=86400';
-  '' + ''
-    add_header Access-Control-Allow-Origin    '*';
-    add_header Access-Control-Allow-Methods   'GET, OPTIONS';
-    add_header Access-Control-Allow-Headers   'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
+  nginxCommonHeadersExtra = ''
+    add_header Access-Control-Allow-Origin '*';
+    add_header Access-Control-Allow-Methods 'GET, OPTIONS';
+    add_header Access-Control-Allow-Headers 'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
   '';
 
 in {
@@ -330,6 +328,8 @@ in {
       }
     ];
 
+    environment.systemPackages = [ cfg.package.cli ];
+
     services.peertube.settings = lib.mkMerge [
       {
         listen = {
@@ -355,12 +355,13 @@ in {
           tmp_persistent = lib.mkDefault "/var/lib/peertube/storage/tmp_persistent/";
           bin = lib.mkDefault "/var/lib/peertube/storage/bin/";
           avatars = lib.mkDefault "/var/lib/peertube/storage/avatars/";
-          videos = lib.mkDefault "/var/lib/peertube/storage/videos/";
+          web_videos = lib.mkDefault "/var/lib/peertube/storage/web-videos/";
           streaming_playlists = lib.mkDefault "/var/lib/peertube/storage/streaming-playlists/";
           redundancy = lib.mkDefault "/var/lib/peertube/storage/redundancy/";
           logs = lib.mkDefault "/var/lib/peertube/storage/logs/";
           previews = lib.mkDefault "/var/lib/peertube/storage/previews/";
           thumbnails = lib.mkDefault "/var/lib/peertube/storage/thumbnails/";
+          storyboards = lib.mkDefault "/var/lib/peertube/storage/storyboards/";
           torrents = lib.mkDefault "/var/lib/peertube/storage/torrents/";
           captions = lib.mkDefault "/var/lib/peertube/storage/captions/";
           cache = lib.mkDefault "/var/lib/peertube/storage/cache/";
@@ -428,7 +429,7 @@ in {
 
       environment = env;
 
-      path = with pkgs; [ bashInteractive ffmpeg nodejs_18 openssl yarn python3 ];
+      path = with pkgs; [ nodejs_18 yarn ffmpeg-headless openssl ];
 
       script = ''
         #!/bin/sh
@@ -456,7 +457,7 @@ in {
         ln -sf ${cfg.package}/config/default.yaml /var/lib/peertube/config/default.yaml
         ln -sf ${cfg.package}/client/dist -T /var/lib/peertube/www/client
         ln -sf ${cfg.settings.storage.client_overrides} -T /var/lib/peertube/www/client-overrides
-        npm start
+        node dist/server
       '';
       serviceConfig = {
         Type = "simple";
@@ -488,6 +489,9 @@ in {
 
     services.nginx = lib.mkIf cfg.configureNginx {
       enable = true;
+      upstreams."peertube".servers = {
+        "127.0.0.1:${toString cfg.listenHttp}".fail_timeout = "0";
+      };
       virtualHosts."${cfg.localDomain}" = {
         root = "/var/lib/peertube/www";
 
@@ -497,14 +501,14 @@ in {
           priority = 1110;
         };
 
-        locations."= /api/v1/videos/upload-resumable" = {
+        locations."~ ^/api/v1/videos/(upload-resumable|([^/]+/source/replace-resumable))$" = {
           tryFiles = "/dev/null @api";
           priority = 1120;
 
           extraConfig = ''
-            client_max_body_size                        0;
-            proxy_request_buffering                     off;
-          '';
+            client_max_body_size 0;
+            proxy_request_buffering off;
+          '' + nginxCommonHeaders;
         };
 
         locations."~ ^/api/v1/videos/(upload|([^/]+/studio/edit))$" = {
@@ -513,13 +517,11 @@ in {
           priority = 1130;
 
           extraConfig = ''
-            client_max_body_size                        12G;
-            add_header X-File-Maximum-Size              8G always;
-          '' + lib.optionalString cfg.enableWebHttps ''
-            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
-          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
-            add_header Alt-Svc                          'h3=":443"; ma=86400';
-          '';
+            limit_except POST HEAD { deny all; }
+
+            client_max_body_size 12G;
+            add_header X-File-Maximum-Size 8G always;
+          '' + nginxCommonHeaders;
         };
 
         locations."~ ^/api/v1/runners/jobs/[^/]+/(update|success)$" = {
@@ -528,13 +530,9 @@ in {
           priority = 1135;
 
           extraConfig = ''
-            client_max_body_size                        12G;
-            add_header X-File-Maximum-Size              8G always;
-          '' + lib.optionalString cfg.enableWebHttps ''
-            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
-          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
-            add_header Alt-Svc                          'h3=":443"; ma=86400';
-          '';
+            client_max_body_size 12G;
+            add_header X-File-Maximum-Size 8G always;
+          '' + nginxCommonHeaders;
         };
 
         locations."~ ^/api/v1/(videos|video-playlists|video-channels|users/me)" = {
@@ -542,32 +540,28 @@ in {
           priority = 1140;
 
           extraConfig = ''
-            client_max_body_size                        6M;
-            add_header X-File-Maximum-Size              4M always;
-          '' + lib.optionalString cfg.enableWebHttps ''
-            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
-          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
-            add_header Alt-Svc                          'h3=":443"; ma=86400';
-          '';
+            client_max_body_size 6M;
+            add_header X-File-Maximum-Size 4M always;
+          '' + nginxCommonHeaders;
         };
 
         locations."@api" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          proxyPass = "http://peertube";
           priority = 1150;
 
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
 
-            proxy_connect_timeout                       10m;
+            proxy_connect_timeout 10m;
 
-            proxy_send_timeout                          10m;
-            proxy_read_timeout                          10m;
+            proxy_send_timeout 10m;
+            proxy_read_timeout 10m;
 
-            client_max_body_size                        100k;
-            send_timeout                                10m;
-          '';
+            client_max_body_size 100k;
+            send_timeout 10m;
+          ''+ nginxCommonHeaders;
         };
 
         # Websocket
@@ -581,7 +575,7 @@ in {
           priority = 1220;
 
           extraConfig = ''
-            proxy_read_timeout                          15m;
+            proxy_read_timeout 15m;
           '';
         };
 
@@ -591,84 +585,82 @@ in {
         };
 
         locations."@api_websocket" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          proxyPass = "http://peertube";
           priority = 1240;
 
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
-            proxy_set_header Upgrade                    $http_upgrade;
-            proxy_set_header Connection                 'upgrade';
-
-            proxy_http_version                          1.1;
-          '';
+            proxy_http_version 1.1;
+            proxy_set_header Upgrade $http_upgrade;
+            proxy_set_header Connection 'upgrade';
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
+          '' + nginxCommonHeaders;
         };
 
         # Bypass PeerTube for performance reasons.
         locations."~ ^/client/(assets/images/(icons/icon-36x36\.png|icons/icon-48x48\.png|icons/icon-72x72\.png|icons/icon-96x96\.png|icons/icon-144x144\.png|icons/icon-192x192\.png|icons/icon-512x512\.png|logo\.svg|favicon\.png|default-playlist\.jpg|default-avatar-account\.png|default-avatar-account-48x48\.png|default-avatar-video-channel\.png|default-avatar-video-channel-48x48\.png))$" = {
           tryFiles = "/client-overrides/$1 /client/$1 $1";
           priority = 1310;
+
+          extraConfig = nginxCommonHeaders;
         };
 
         locations."~ ^/client/(.*\.(js|css|png|svg|woff2|otf|ttf|woff|eot))$" = {
           alias = "${cfg.package}/client/dist/$1";
           priority = 1320;
           extraConfig = ''
-            add_header Cache-Control                    'public, max-age=604800, immutable';
-          '' + lib.optionalString cfg.enableWebHttps ''
-            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
-          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
-            add_header Alt-Svc                          'h3=":443"; ma=86400';
-          '';
+            add_header Cache-Control 'public, max-age=604800, immutable';
+          '' + nginxCommonHeaders;
         };
 
         locations."^~ /download/" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          proxyPass = "http://peertube";
           priority = 1410;
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
 
-            proxy_limit_rate                            5M;
-          '';
+            proxy_limit_rate 5M;
+          '' + nginxCommonHeaders;
         };
 
-        locations."^~ /static/streaming-playlists/private/" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+        locations."^~ /static/streaming-playlists/hls/private/" = {
+          proxyPass = "http://peertube";
           priority = 1420;
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
 
-            proxy_limit_rate                            5M;
-          '';
+            proxy_limit_rate 5M;
+          '' + nginxCommonHeaders;
         };
 
         locations."^~ /static/web-videos/private/" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          proxyPass = "http://peertube";
           priority = 1430;
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
 
-            proxy_limit_rate                            5M;
-          '';
+            proxy_limit_rate 5M;
+          '' + nginxCommonHeaders;
         };
 
         locations."^~ /static/webseed/private/" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          proxyPass = "http://peertube";
           priority = 1440;
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
 
-            proxy_limit_rate                            5M;
-          '';
+            proxy_limit_rate 5M;
+          '' + nginxCommonHeaders;
         };
 
         locations."^~ /static/redundancy/" = {
@@ -676,33 +668,35 @@ in {
           root = cfg.settings.storage.redundancy;
           priority = 1450;
           extraConfig = ''
-            set $peertube_limit_rate                    800k;
+            set $peertube_limit_rate 800k;
 
             if ($request_uri ~ -fragmented.mp4$) {
-              set $peertube_limit_rate                  5M;
+              set $peertube_limit_rate 5M;
             }
 
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
-              add_header Access-Control-Max-Age         1728000;
-              add_header Content-Type                   'text/plain charset=UTF-8';
-              add_header Content-Length                 0;
-              return                                    204;
+              ${nginxCommonHeadersExtra}
+              add_header Access-Control-Max-Age 1728000;
+              add_header Content-Type 'text/plain charset=UTF-8';
+              add_header Content-Length 0;
+              return 204;
             }
             if ($request_method = 'GET') {
               ${nginxCommonHeaders}
+              ${nginxCommonHeadersExtra}
 
-              access_log                                off;
+              access_log off;
             }
 
-            aio                                         threads;
-            sendfile                                    on;
-            sendfile_max_chunk                          1M;
+            aio threads;
+            sendfile on;
+            sendfile_max_chunk 1M;
 
-            limit_rate                                  $peertube_limit_rate;
-            limit_rate_after                            5M;
+            limit_rate $peertube_limit_rate;
+            limit_rate_after 5M;
 
-            rewrite ^/static/redundancy/(.*)$           /$1 break;
+            rewrite ^/static/redundancy/(.*)$ /$1 break;
           '';
         };
 
@@ -711,109 +705,111 @@ in {
           root = cfg.settings.storage.streaming_playlists;
           priority = 1460;
           extraConfig = ''
-            set $peertube_limit_rate                    800k;
+            set $peertube_limit_rate 800k;
 
             if ($request_uri ~ -fragmented.mp4$) {
-              set $peertube_limit_rate                  5M;
+              set $peertube_limit_rate 5M;
             }
 
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
-              add_header Access-Control-Max-Age         1728000;
-              add_header Content-Type                   'text/plain charset=UTF-8';
-              add_header Content-Length                 0;
-              return                                    204;
+              ${nginxCommonHeadersExtra}
+              add_header Access-Control-Max-Age 1728000;
+              add_header Content-Type 'text/plain charset=UTF-8';
+              add_header Content-Length 0;
+              return 204;
             }
             if ($request_method = 'GET') {
               ${nginxCommonHeaders}
+              ${nginxCommonHeadersExtra}
 
-              access_log                                off;
+              access_log off;
             }
 
-            aio                                         threads;
-            sendfile                                    on;
-            sendfile_max_chunk                          1M;
+            aio threads;
+            sendfile on;
+            sendfile_max_chunk 1M;
 
-            limit_rate                                  $peertube_limit_rate;
-            limit_rate_after                            5M;
+            limit_rate $peertube_limit_rate;
+            limit_rate_after 5M;
 
-            rewrite ^/static/streaming-playlists/(.*)$  /$1 break;
+            rewrite ^/static/streaming-playlists/(.*)$ /$1 break;
           '';
         };
 
         locations."^~ /static/web-videos/" = {
           tryFiles = "$uri @api";
-          root = cfg.settings.storage.streaming_playlists;
+          root = cfg.settings.storage.web_videos;
           priority = 1470;
           extraConfig = ''
-            set $peertube_limit_rate                    800k;
+            set $peertube_limit_rate 800k;
 
             if ($request_uri ~ -fragmented.mp4$) {
-              set $peertube_limit_rate                  5M;
+              set $peertube_limit_rate 5M;
             }
 
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
-              add_header Access-Control-Max-Age         1728000;
-              add_header Content-Type                   'text/plain charset=UTF-8';
-              add_header Content-Length                 0;
-              return                                    204;
+              ${nginxCommonHeadersExtra}
+              add_header Access-Control-Max-Age 1728000;
+              add_header Content-Type 'text/plain charset=UTF-8';
+              add_header Content-Length 0;
+              return 204;
             }
             if ($request_method = 'GET') {
               ${nginxCommonHeaders}
+              ${nginxCommonHeadersExtra}
 
-              access_log                                off;
+              access_log off;
             }
 
-            aio                                         threads;
-            sendfile                                    on;
-            sendfile_max_chunk                          1M;
+            aio threads;
+            sendfile on;
+            sendfile_max_chunk 1M;
 
-            limit_rate                                  $peertube_limit_rate;
-            limit_rate_after                            5M;
+            limit_rate $peertube_limit_rate;
+            limit_rate_after 5M;
 
-            rewrite ^/static/streaming-playlists/(.*)$  /$1 break;
+            rewrite ^/static/web-videos/(.*)$ /$1 break;
           '';
         };
 
         locations."^~ /static/webseed/" = {
           tryFiles = "$uri @api";
-          root = cfg.settings.storage.videos;
+          root = cfg.settings.storage.web_videos;
           priority = 1480;
           extraConfig = ''
-            set $peertube_limit_rate                    800k;
+            set $peertube_limit_rate 800k;
 
             if ($request_uri ~ -fragmented.mp4$) {
-              set $peertube_limit_rate                  5M;
+              set $peertube_limit_rate 5M;
             }
 
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
-              add_header Access-Control-Max-Age         1728000;
-              add_header Content-Type                   'text/plain charset=UTF-8';
-              add_header Content-Length                 0;
-              return                                    204;
+              ${nginxCommonHeadersExtra}
+              add_header Access-Control-Max-Age 1728000;
+              add_header Content-Type 'text/plain charset=UTF-8';
+              add_header Content-Length 0;
+              return 204;
             }
             if ($request_method = 'GET') {
               ${nginxCommonHeaders}
+              ${nginxCommonHeadersExtra}
 
-              access_log                                off;
+              access_log off;
             }
 
-            aio                                         threads;
-            sendfile                                    on;
-            sendfile_max_chunk                          1M;
+            aio threads;
+            sendfile on;
+            sendfile_max_chunk 1M;
 
-            limit_rate                                  $peertube_limit_rate;
-            limit_rate_after                            5M;
+            limit_rate $peertube_limit_rate;
+            limit_rate_after 5M;
 
-            rewrite ^/static/webseed/(.*)$              /$1 break;
+            rewrite ^/static/webseed/(.*)$ /web-videos/$1 break;
           '';
         };
-
-        extraConfig = lib.optionalString cfg.enableWebHttps ''
-          add_header Strict-Transport-Security          'max-age=63072000; includeSubDomains';
-        '';
       };
     };
 
@@ -848,7 +844,7 @@ in {
           home = cfg.package;
         };
       })
-      (lib.attrsets.setAttrByPath [ cfg.user "packages" ] [ cfg.package peertubeEnv peertubeCli pkgs.ffmpeg pkgs.nodejs_18 pkgs.yarn ])
+      (lib.attrsets.setAttrByPath [ cfg.user "packages" ] [ peertubeEnv pkgs.nodejs_18 pkgs.yarn pkgs.ffmpeg-headless ])
       (lib.mkIf cfg.redis.enableUnixSocket {${config.services.peertube.user}.extraGroups = [ "redis-peertube" ];})
     ];
 
diff --git a/nixpkgs/nixos/modules/services/web-apps/pict-rs.md b/nixpkgs/nixos/modules/services/web-apps/pict-rs.md
index 2fa6bb3aebce..56c51e0d7259 100644
--- a/nixpkgs/nixos/modules/services/web-apps/pict-rs.md
+++ b/nixpkgs/nixos/modules/services/web-apps/pict-rs.md
@@ -7,7 +7,9 @@ pict-rs is a  a simple image hosting service.
 the minimum to start pict-rs is
 
 ```nix
-services.pict-rs.enable = true;
+{
+  services.pict-rs.enable = true;
+}
 ```
 
 this will start the http server on port 8080 by default.
diff --git a/nixpkgs/nixos/modules/services/web-apps/plausible.md b/nixpkgs/nixos/modules/services/web-apps/plausible.md
index 1328ce69441a..d3673eabddd4 100644
--- a/nixpkgs/nixos/modules/services/web-apps/plausible.md
+++ b/nixpkgs/nixos/modules/services/web-apps/plausible.md
@@ -11,7 +11,7 @@ $ openssl rand -base64 64
 ```
 
 After that, `plausible` can be deployed like this:
-```
+```nix
 {
   services.plausible = {
     enable = true;
diff --git a/nixpkgs/nixos/modules/services/web-apps/pretix.nix b/nixpkgs/nixos/modules/services/web-apps/pretix.nix
index 2355f8c450a1..22ee9769aa92 100644
--- a/nixpkgs/nixos/modules/services/web-apps/pretix.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/pretix.nix
@@ -63,7 +63,7 @@ in
   };
 
   options.services.pretix = {
-    enable = mkEnableOption "pretix";
+    enable = mkEnableOption "Pretix, a ticket shop application for conferences, festivals, concerts, etc.";
 
     package = mkPackageOption pkgs "pretix" { };
 
diff --git a/nixpkgs/nixos/modules/services/web-apps/slskd.nix b/nixpkgs/nixos/modules/services/web-apps/slskd.nix
index 580f66ec3ac9..15a5fd1177ad 100644
--- a/nixpkgs/nixos/modules/services/web-apps/slskd.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/slskd.nix
@@ -2,120 +2,248 @@
 
 let
   settingsFormat = pkgs.formats.yaml {};
+  defaultUser = "slskd";
 in {
   options.services.slskd = with lib; with types; {
     enable = mkEnableOption "enable slskd";
 
-    rotateLogs = mkEnableOption "enable an unit and timer that will rotate logs in /var/slskd/logs";
+    package = mkPackageOptionMD pkgs "slskd" { };
 
-    package = mkPackageOption pkgs "slskd" { };
+    user = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = "User account under which slskd runs.";
+    };
 
-    nginx = mkOption {
-      description = lib.mdDoc "options for nginx";
-      example = {
-        enable = true;
-        domain = "example.com";
-        contextPath = "/slskd";
-      };
-      type = submodule ({name, config, ...}: {
-        options = {
-          enable = mkEnableOption "enable nginx as a reverse proxy";
+    group = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = "Group under which slskd runs.";
+    };
 
-          domainName = mkOption {
-            type = str;
-            description = "Domain you want to use";
-          };
-          contextPath = mkOption {
-            type = types.path;
-            default = "/";
-            description = lib.mdDoc ''
-              The context path, i.e., the last part of the slskd
-              URL. Typically '/' or '/slskd'. Default '/'
-            '';
-          };
-        };
-      });
+    domain = mkOption {
+      type = types.nullOr types.str;
+      description = ''
+        If non-null, enables an nginx reverse proxy virtual host at this FQDN,
+        at the path configurated with `services.slskd.web.url_base`.
+      '';
+      example = "slskd.example.com";
+    };
+
+    nginx = mkOption {
+      type = types.submodule (import ../web-servers/nginx/vhost-options.nix { inherit config lib; });
+      default = {};
+      example = lib.literalExpression ''
+        {
+          enableACME = true;
+          forceHttps = true;
+        }
+      '';
+      description = ''
+        This option customizes the nginx virtual host set up for slskd.
+      '';
     };
 
     environmentFile = mkOption {
       type = path;
       description = ''
-        Path to a file containing secrets.
-        It must at least contain the variable `SLSKD_SLSK_PASSWORD`
+        Path to the environment file sourced on startup.
+        It must at least contain the variables `SLSKD_SLSK_USERNAME` and `SLSKD_SLSK_PASSWORD`.
+        Web interface credentials should also be set here in `SLSKD_USERNAME` and `SLSKD_PASSWORD`.
+        Other, optional credentials like SOCKS5 with `SLSKD_SLSK_PROXY_USERNAME` and `SLSKD_SLSK_PROXY_PASSWORD`
+        should all reside here instead of in the world-readable nix store.
+        Variables are documented at https://github.com/slskd/slskd/blob/master/docs/config.md
       '';
     };
 
     openFirewall = mkOption {
       type = bool;
-      description = ''
-        Whether to open the firewall for services.slskd.settings.listen_port";
-      '';
+      description = "Whether to open the firewall for the soulseek network listen port (not the web interface port).";
       default = false;
     };
 
     settings = mkOption {
-      description = lib.mdDoc ''
-        Configuration for slskd, see
-        [available options](https://github.com/slskd/slskd/blob/master/docs/config.md)
-        `APP_DIR` is set to /var/lib/slskd, where default download & incomplete directories,
-        log and databases will be created.
+      description = ''
+        Application configuration for slskd. See
+        [documentation](https://github.com/slskd/slskd/blob/master/docs/config.md).
       '';
       default = {};
       type = submodule {
         freeformType = settingsFormat.type;
         options = {
+          remote_file_management = mkEnableOption "modification of share contents through the web ui";
+
+          flags = {
+            force_share_scan = mkOption {
+              type = bool;
+              description = "Force a rescan of shares on every startup.";
+            };
+            no_version_check = mkOption {
+              type = bool;
+              default = true;
+              visible = false;
+              description = "Don't perform a version check on startup.";
+            };
+          };
+
+          directories = {
+            incomplete = mkOption {
+              type = nullOr path;
+              description = "Directory where incomplete downloading files are stored.";
+              defaultText = "/var/lib/slskd/incomplete";
+              default = null;
+            };
+            downloads = mkOption {
+              type = nullOr path;
+              description = "Directory where downloaded files are stored.";
+              defaultText = "/var/lib/slskd/downloads";
+              default = null;
+            };
+          };
+
+          shares = {
+            directories = mkOption {
+              type = listOf str;
+              description = ''
+                Paths to shared directories. See
+                [documentation](https://github.com/slskd/slskd/blob/master/docs/config.md#directories)
+                for advanced usage.
+              '';
+              example = lib.literalExpression ''[ "/home/John/Music" "!/home/John/Music/Recordings" "[Music Drive]/mnt" ]'';
+            };
+            filters = mkOption {
+              type = listOf str;
+              example = lib.literalExpression ''[ "\.ini$" "Thumbs.db$" "\.DS_Store$" ]'';
+              description = "Regular expressions of files to exclude from sharing.";
+            };
+          };
+
+          rooms = mkOption {
+            type = listOf str;
+            description = "Chat rooms to join on startup.";
+          };
 
           soulseek = {
-            username = mkOption {
+            description = mkOption {
               type = str;
-              description = "Username on the Soulseek Network";
+              description = "The user description for the Soulseek network.";
+              defaultText = "A slskd user. https://github.com/slskd/slskd";
             };
             listen_port = mkOption {
               type = port;
-              description = "Port to use for communication on the Soulseek Network";
-              default = 50000;
+              description = "The port on which to listen for incoming connections.";
+              default = 50300;
             };
           };
 
+          global = {
+            # TODO speed units
+            upload = {
+              slots = mkOption {
+                type = ints.unsigned;
+                description = "Limit of the number of concurrent upload slots.";
+              };
+              speed_limit = mkOption {
+                type = ints.unsigned;
+                description = "Total upload speed limit.";
+              };
+            };
+            download = {
+              slots = mkOption {
+                type = ints.unsigned;
+                description = "Limit of the number of concurrent download slots.";
+              };
+              speed_limit = mkOption {
+                type = ints.unsigned;
+                description = "Total upload download limit";
+              };
+            };
+          };
+
+          filters.search.request = mkOption {
+            type = listOf str;
+            example = lib.literalExpression ''[ "^.{1,2}$" ]'';
+            description = "Incoming search requests which match this filter are ignored.";
+          };
+
           web = {
             port = mkOption {
               type = port;
-              default = 5001;
-              description = "The HTTP listen port";
+              default = 5030;
+              description = "The HTTP listen port.";
             };
             url_base = mkOption {
               type = path;
-              default = config.services.slskd.nginx.contextPath;
-              defaultText = "config.services.slskd.nginx.contextPath";
-              description = lib.mdDoc ''
-                The context path, i.e., the last part of the slskd URL
-              '';
+              default = "/";
+              description = "The base path in the url for web requests.";
+            };
+            # Users should use a reverse proxy instead for https
+            https.disabled = mkOption {
+              type = bool;
+              default = true;
+              description = "Disable the built-in HTTPS server";
             };
           };
 
-          shares = {
-            directories = mkOption {
-              type = listOf str;
-              description = lib.mdDoc ''
-                Paths to your shared directories. See
-                [documentation](https://github.com/slskd/slskd/blob/master/docs/config.md#directories)
-                for advanced usage
-              '';
+          retention = {
+            transfers = {
+              upload = {
+                succeeded = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of succeeded upload tasks.";
+                  defaultText = "(indefinite)";
+                };
+                errored = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of errored upload tasks.";
+                  defaultText = "(indefinite)";
+                };
+                cancelled = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of cancelled upload tasks.";
+                  defaultText = "(indefinite)";
+                };
+              };
+              download = {
+                succeeded = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of succeeded download tasks.";
+                  defaultText = "(indefinite)";
+                };
+                errored = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of errored download tasks.";
+                  defaultText = "(indefinite)";
+                };
+                cancelled = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of cancelled download tasks.";
+                  defaultText = "(indefinite)";
+                };
+              };
+            };
+            files = {
+              complete = mkOption {
+                type = ints.unsigned;
+                description = "Lifespan of completely downloaded files in minutes.";
+                example = 20160;
+                defaultText = "(indefinite)";
+              };
+              incomplete = mkOption {
+                type = ints.unsigned;
+                description = "Lifespan of incomplete downloading files in minutes.";
+                defaultText = "(indefinite)";
+              };
             };
           };
 
-          directories = {
-            incomplete = mkOption {
-              type = nullOr path;
-              description = "Directory where downloading files are stored";
-              defaultText = "<APP_DIR>/incomplete";
-              default = null;
-            };
-            downloads = mkOption {
-              type = nullOr path;
-              description = "Directory where downloaded files are stored";
-              defaultText = "<APP_DIR>/downloads";
-              default = null;
+          logger = {
+            # Disable by default, journald already retains as needed
+            disk = mkOption {
+              type = bool;
+              description = "Whether to log to the application directory.";
+              default = false;
+              visible = false;
             };
           };
         };
@@ -126,51 +254,42 @@ in {
   config = let
     cfg = config.services.slskd;
 
-    confWithoutNullValues = (lib.filterAttrs (key: value: value != null) cfg.settings);
+    confWithoutNullValues = (lib.filterAttrsRecursive (key: value: (builtins.tryEval value).success && value != null) cfg.settings);
 
     configurationYaml = settingsFormat.generate "slskd.yml" confWithoutNullValues;
 
   in lib.mkIf cfg.enable {
 
-    users = {
-      users.slskd = {
+    # Force off, configuration file is in nix store and is immutable
+    services.slskd.settings.remote_configuration = lib.mkForce false;
+
+    users.users = lib.optionalAttrs (cfg.user == defaultUser) {
+      "${defaultUser}" = {
+        group = cfg.group;
         isSystemUser = true;
-        group = "slskd";
       };
-      groups.slskd = {};
     };
 
-    # Reverse proxy configuration
-    services.nginx.enable = true;
-    services.nginx.virtualHosts."${cfg.nginx.domainName}" = {
-      forceSSL = true;
-      enableACME = true;
-      locations = {
-        "${cfg.nginx.contextPath}" = {
-          proxyPass = "http://localhost:${toString cfg.settings.web.port}";
-          proxyWebsockets = true;
-        };
-      };
+    users.groups = lib.optionalAttrs (cfg.group == defaultUser) {
+      "${defaultUser}" = {};
     };
 
-    # Hide state & logs
-    systemd.tmpfiles.rules = [
-      "d /var/lib/slskd/data 0750 slskd slskd - -"
-      "d /var/lib/slskd/logs 0750 slskd slskd - -"
-    ];
-
     systemd.services.slskd = {
       description = "A modern client-server application for the Soulseek file sharing network";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         Type = "simple";
-        User = "slskd";
+        User = cfg.user;
+        Group = cfg.group;
         EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
-        StateDirectory = "slskd";
+        StateDirectory = "slskd";  # Creates /var/lib/slskd and manages permissions
         ExecStart = "${cfg.package}/bin/slskd --app-dir /var/lib/slskd --config ${configurationYaml}";
         Restart = "on-failure";
         ReadOnlyPaths = map (d: builtins.elemAt (builtins.split "[^/]*(/.+)" d) 1) cfg.settings.shares.directories;
+        ReadWritePaths =
+          (lib.optional (cfg.settings.directories.incomplete != null) cfg.settings.directories.incomplete) ++
+          (lib.optional (cfg.settings.directories.downloads != null) cfg.settings.directories.downloads);
         LockPersonality = true;
         NoNewPrivileges = true;
         PrivateDevices = true;
@@ -194,18 +313,21 @@ in {
 
     networking.firewall.allowedTCPPorts = lib.optional cfg.openFirewall cfg.settings.soulseek.listen_port;
 
-    systemd.services.slskd-rotatelogs = lib.mkIf cfg.rotateLogs {
-      description = "Rotate slskd logs";
-      serviceConfig = {
-        Type = "oneshot";
-        User = "slskd";
-        ExecStart = [
-          "${pkgs.findutils}/bin/find /var/lib/slskd/logs/ -type f -mtime +10 -delete"
-          "${pkgs.findutils}/bin/find /var/lib/slskd/logs/ -type f -mtime +1  -exec ${pkgs.gzip}/bin/gzip -q {} ';'"
-        ];
-      };
-      startAt = "daily";
+    services.nginx = lib.mkIf (cfg.domain != null) {
+      enable = lib.mkDefault true;
+      virtualHosts."${cfg.domain}" = lib.mkMerge [
+        cfg.nginx
+        {
+          locations."${cfg.settings.web.url_base}" = {
+            proxyPass = "http://127.0.0.1:${toString cfg.settings.web.port}";
+            proxyWebsockets = true;
+          };
+        }
+      ];
     };
+  };
 
+  meta = {
+    maintainers = with lib.maintainers; [ ppom melvyn2 ];
   };
 }
diff --git a/nixpkgs/nixos/modules/services/web-apps/suwayomi-server.md b/nixpkgs/nixos/modules/services/web-apps/suwayomi-server.md
index 18e7a631443f..2185556a8721 100644
--- a/nixpkgs/nixos/modules/services/web-apps/suwayomi-server.md
+++ b/nixpkgs/nixos/modules/services/web-apps/suwayomi-server.md
@@ -100,7 +100,7 @@ Not all the configuration options are available directly in this module, but you
       server = {
         port = 4567;
         autoDownloadNewChapters = false;
-        maxSourcesInParallel" = 6;
+        maxSourcesInParallel = 6;
         extensionRepos = [
           "https://raw.githubusercontent.com/MY_ACCOUNT/MY_REPO/repo/index.min.json"
         ];
diff --git a/nixpkgs/nixos/modules/services/web-servers/garage.md b/nixpkgs/nixos/modules/services/web-servers/garage.md
index 3a9b85ce0603..fbefd1914d87 100644
--- a/nixpkgs/nixos/modules/services/web-servers/garage.md
+++ b/nixpkgs/nixos/modules/services/web-servers/garage.md
@@ -80,7 +80,7 @@ If major-releases will be abandoned by upstream, we should check first if those
 in NixOS for a safe upgrade-path before removing those. In that case we should keep those
 packages, but mark them as insecure in an expression like this (in
 `<nixpkgs/pkgs/tools/filesystem/garage/default.nix>`):
-```
+```nix
 /* ... */
 {
   garage_0_7_3 = generic {
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md
index aa36f66970ec..2b4bd06df04f 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md
@@ -8,9 +8,11 @@ All of the core apps, optional apps, games, and core developer tools from GNOME
 
 To enable the GNOME desktop use:
 
-```
-services.xserver.desktopManager.gnome.enable = true;
-services.xserver.displayManager.gdm.enable = true;
+```nix
+{
+  services.xserver.desktopManager.gnome.enable = true;
+  services.xserver.displayManager.gdm.enable = true;
+}
 ```
 
 ::: {.note}
@@ -23,8 +25,10 @@ The default applications used in NixOS are very minimal, inspired by the default
 
 If you’d like to only use the GNOME desktop and not the apps, you can disable them with:
 
-```
-services.gnome.core-utilities.enable = false;
+```nix
+{
+  services.gnome.core-utilities.enable = false;
+}
 ```
 
 and none of them will be installed.
@@ -37,9 +41,11 @@ Note that this mechanism can only exclude core utilities, games and core develop
 
 It is also possible to disable many of the [core services](https://github.com/NixOS/nixpkgs/blob/b8ec4fd2a4edc4e30d02ba7b1a2cc1358f3db1d5/nixos/modules/services/x11/desktop-managers/gnome.nix#L329-L348). For example, if you do not need indexing files, you can disable Tracker with:
 
-```
-services.gnome.tracker-miners.enable = false;
-services.gnome.tracker.enable = false;
+```nix
+{
+  services.gnome.tracker-miners.enable = false;
+  services.gnome.tracker.enable = false;
+}
 ```
 
 Note, however, that doing so is not supported and might break some applications. Notably, GNOME Music cannot work without Tracker.
@@ -48,39 +54,47 @@ Note, however, that doing so is not supported and might break some applications.
 
 You can install all of the GNOME games with:
 
-```
-services.gnome.games.enable = true;
+```nix
+{
+  services.gnome.games.enable = true;
+}
 ```
 
 ### GNOME core developer tools {#sec-gnome-core-developer-tools}
 
 You can install GNOME core developer tools with:
 
-```
-services.gnome.core-developer-tools.enable = true;
+```nix
+{
+  services.gnome.core-developer-tools.enable = true;
+}
 ```
 
 ## Enabling GNOME Flashback {#sec-gnome-enable-flashback}
 
 GNOME Flashback provides a desktop environment based on the classic GNOME 2 architecture. You can enable the default GNOME Flashback session, which uses the Metacity window manager, with:
 
-```
-services.xserver.desktopManager.gnome.flashback.enableMetacity = true;
+```nix
+{
+  services.xserver.desktopManager.gnome.flashback.enableMetacity = true;
+}
 ```
 
 It is also possible to create custom sessions that replace Metacity with a different window manager using [](#opt-services.xserver.desktopManager.gnome.flashback.customSessions).
 
 The following example uses `xmonad` window manager:
 
-```
-services.xserver.desktopManager.gnome.flashback.customSessions = [
-  {
-    wmName = "xmonad";
-    wmLabel = "XMonad";
-    wmCommand = "${pkgs.haskellPackages.xmonad}/bin/xmonad";
-    enableGnomePanel = false;
-  }
-];
+```nix
+{
+  services.xserver.desktopManager.gnome.flashback.customSessions = [
+    {
+      wmName = "xmonad";
+      wmLabel = "XMonad";
+      wmCommand = "${pkgs.haskellPackages.xmonad}/bin/xmonad";
+      enableGnomePanel = false;
+    }
+  ];
+}
 ```
 
 ## Icons and GTK Themes {#sec-gnome-icons-and-gtk-themes}
@@ -104,12 +118,14 @@ Some packages that include Shell extensions, like `gnome.gpaste`, don’t have t
 
 You can install them like any other package:
 
-```
-environment.systemPackages = [
-  gnomeExtensions.dash-to-dock
-  gnomeExtensions.gsconnect
-  gnomeExtensions.mpris-indicator-button
-];
+```nix
+{
+  environment.systemPackages = [
+    gnomeExtensions.dash-to-dock
+    gnomeExtensions.gsconnect
+    gnomeExtensions.mpris-indicator-button
+  ];
+}
 ```
 
 Unfortunately, we lack a way for these to be managed in a completely declarative way.
@@ -136,23 +152,25 @@ You can use `dconf-editor` tool to explore which GSettings you can set.
 
 ### Example {#sec-gnome-gsettings-overrides-example}
 
-```
-services.xserver.desktopManager.gnome = {
-  extraGSettingsOverrides = ''
-    # Change default background
-    [org.gnome.desktop.background]
-    picture-uri='file://${pkgs.nixos-artwork.wallpapers.mosaic-blue.gnomeFilePath}'
-
-    # Favorite apps in gnome-shell
-    [org.gnome.shell]
-    favorite-apps=['org.gnome.Console.desktop', 'org.gnome.Nautilus.desktop']
-  '';
-
-  extraGSettingsOverridePackages = [
-    pkgs.gsettings-desktop-schemas # for org.gnome.desktop
-    pkgs.gnome.gnome-shell # for org.gnome.shell
-  ];
-};
+```nix
+{
+  services.xserver.desktopManager.gnome = {
+    extraGSettingsOverrides = ''
+      # Change default background
+      [org.gnome.desktop.background]
+      picture-uri='file://${pkgs.nixos-artwork.wallpapers.mosaic-blue.gnomeFilePath}'
+
+      # Favorite apps in gnome-shell
+      [org.gnome.shell]
+      favorite-apps=['org.gnome.Console.desktop', 'org.gnome.Nautilus.desktop']
+    '';
+
+    extraGSettingsOverridePackages = [
+      pkgs.gsettings-desktop-schemas # for org.gnome.desktop
+      pkgs.gnome.gnome-shell # for org.gnome.shell
+    ];
+  };
+}
 ```
 
 ## Frequently Asked Questions {#sec-gnome-faq}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md b/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md
index 1c14ede84749..ce251ec2d394 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md
@@ -5,17 +5,23 @@ Pantheon is the desktop environment created for the elementary OS distribution.
 ## Enabling Pantheon {#sec-pantheon-enable}
 
 All of Pantheon is working in NixOS and the applications should be available, aside from a few [exceptions](https://github.com/NixOS/nixpkgs/issues/58161). To enable Pantheon, set
-```
-services.xserver.desktopManager.pantheon.enable = true;
+```nix
+{
+  services.xserver.desktopManager.pantheon.enable = true;
+}
 ```
 This automatically enables LightDM and Pantheon's LightDM greeter. If you'd like to disable this, set
-```
-services.xserver.displayManager.lightdm.greeters.pantheon.enable = false;
-services.xserver.displayManager.lightdm.enable = false;
+```nix
+{
+  services.xserver.displayManager.lightdm.greeters.pantheon.enable = false;
+  services.xserver.displayManager.lightdm.enable = false;
+}
 ```
 but please be aware using Pantheon without LightDM as a display manager will break screenlocking from the UI. The NixOS module for Pantheon installs all of Pantheon's default applications. If you'd like to not install Pantheon's apps, set
-```
-services.pantheon.apps.enable = false;
+```nix
+{
+  services.pantheon.apps.enable = false;
+}
 ```
 You can also use [](#opt-environment.pantheon.excludePackages) to remove any other app (like `elementary-mail`).
 
@@ -29,30 +35,33 @@ Wingpanel and Switchboard work differently than they do in other distributions,
 to configure the programs with plugs or indicators.
 
 The difference in NixOS is both these programs are patched to load plugins from a directory that is the value of an environment variable. All of which is controlled in Nix. If you need to configure the particular packages manually you can override the packages like:
-```
+```nix
 wingpanel-with-indicators.override {
   indicators = [
     pkgs.some-special-indicator
   ];
-};
+}
 
+```
+```nix
 switchboard-with-plugs.override {
   plugs = [
     pkgs.some-special-plug
   ];
-};
+}
 ```
 please note that, like how the NixOS options describe these as extra plugins, this would only add to the default plugins included with the programs. If for some reason you'd like to configure which plugins to use exactly, both packages have an argument for this:
-```
+```nix
 wingpanel-with-indicators.override {
   useDefaultIndicators = false;
   indicators = specialListOfIndicators;
-};
-
+}
+```
+```nix
 switchboard-with-plugs.override {
   useDefaultPlugs = false;
   plugs = specialListOfPlugs;
-};
+}
 ```
 this could be most useful for testing a particular plug-in in isolation.
 
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix b/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
index 0861530f21e8..3e7c6b01b3e9 100644
--- a/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
@@ -251,7 +251,6 @@ in
 
     environment.systemPackages = [pkgs.xpra];
 
-    virtualisation.virtualbox.guest.x11 = false;
     hardware.pulseaudio.enable = mkDefault cfg.pulseaudio;
     hardware.pulseaudio.systemWide = mkDefault cfg.pulseaudio;
   };
diff --git a/nixpkgs/nixos/modules/services/x11/xserver.nix b/nixpkgs/nixos/modules/services/x11/xserver.nix
index 4e0235f9ad1d..453f414e2a86 100644
--- a/nixpkgs/nixos/modules/services/x11/xserver.nix
+++ b/nixpkgs/nixos/modules/services/x11/xserver.nix
@@ -303,7 +303,7 @@ in
         type = types.listOf types.str;
         default = [ "modesetting" "fbdev" ];
         example = [
-          "nvidia" "nvidiaLegacy390" "nvidiaLegacy340" "nvidiaLegacy304"
+          "nvidia"
           "amdgpu-pro"
         ];
         # TODO(@oxij): think how to easily add the rest, like those nvidia things
diff --git a/nixpkgs/nixos/modules/system/boot/clevis.md b/nixpkgs/nixos/modules/system/boot/clevis.md
index dcbf55de60a8..39edc0fc38df 100644
--- a/nixpkgs/nixos/modules/system/boot/clevis.md
+++ b/nixpkgs/nixos/modules/system/boot/clevis.md
@@ -39,13 +39,17 @@ For more complete documentation on how to generate a secret with clevis, see the
 
 In order to activate unattended decryption of a resource at boot, enable the `clevis` module:
 
-```
-boot.initrd.clevis.enable = true;
+```nix
+{
+  boot.initrd.clevis.enable = true;
+}
 ```
 
 Then, specify the device you want to decrypt using a given clevis secret. Clevis will automatically try to decrypt the device at boot and will fallback to interactive unlocking if the decryption policy is not fulfilled.
-```
-boot.initrd.clevis.devices."/dev/nvme0n1p1".secretFile = ./nvme0n1p1.jwe;
+```nix
+{
+  boot.initrd.clevis.devices."/dev/nvme0n1p1".secretFile = ./nvme0n1p1.jwe;
+}
 ```
 
 Only `bcachefs`, `zfs` and `luks` encrypted devices are supported at this time.
diff --git a/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix b/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix
index 61e61f32bc5e..43da2496d16c 100644
--- a/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix
+++ b/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix
@@ -93,6 +93,21 @@ in
       defaultText = literalExpression "config.users.users.root.openssh.authorizedKeys.keys";
       description = lib.mdDoc ''
         Authorized keys for the root user on initrd.
+        You can combine the `authorizedKeys` and `authorizedKeyFiles` options.
+      '';
+      example = [
+        "ssh-rsa AAAAB3NzaC1yc2etc/etc/etcjwrsh8e596z6J0l7 example@host"
+        "ssh-ed25519 AAAAC3NzaCetcetera/etceteraJZMfk3QPfQ foo@bar"
+      ];
+    };
+
+    authorizedKeyFiles = mkOption {
+      type = types.listOf types.path;
+      default = config.users.users.root.openssh.authorizedKeys.keyFiles;
+      defaultText = literalExpression "config.users.users.root.openssh.authorizedKeys.keyFiles";
+      description = lib.mdDoc ''
+        Authorized keys taken from files for the root user on initrd.
+        You can combine the `authorizedKeyFiles` and `authorizedKeys` options.
       '';
     };
 
@@ -152,7 +167,7 @@ in
   in mkIf enabled {
     assertions = [
       {
-        assertion = cfg.authorizedKeys != [];
+        assertion = cfg.authorizedKeys != [] || cfg.authorizedKeyFiles != [];
         message = "You should specify at least one authorized key for initrd SSH";
       }
 
@@ -206,6 +221,9 @@ in
       ${concatStrings (map (key: ''
         echo ${escapeShellArg key} >> /root/.ssh/authorized_keys
       '') cfg.authorizedKeys)}
+      ${concatStrings (map (keyFile: ''
+        cat ${keyFile} >> /root/.ssh/authorized_keys
+      '') cfg.authorizedKeyFiles)}
 
       ${flip concatMapStrings cfg.hostKeys (path: ''
         # keys from Nix store are world-readable, which sshd doesn't like
@@ -236,9 +254,13 @@ in
 
       users.root.shell = mkIf (config.boot.initrd.network.ssh.shell != null) config.boot.initrd.network.ssh.shell;
 
-      contents."/etc/ssh/authorized_keys.d/root".text =
-        concatStringsSep "\n" config.boot.initrd.network.ssh.authorizedKeys;
-      contents."/etc/ssh/sshd_config".text = sshdConfig;
+      contents = {
+        "/etc/ssh/sshd_config".text = sshdConfig;
+        "/etc/ssh/authorized_keys.d/root".text =
+          concatStringsSep "\n" (
+            config.boot.initrd.network.ssh.authorizedKeys ++
+            (map (file: lib.fileContents file) config.boot.initrd.network.ssh.authorizedKeyFiles));
+      };
       storePaths = ["${package}/bin/sshd"];
 
       services.sshd = {
diff --git a/nixpkgs/nixos/modules/system/boot/plymouth.nix b/nixpkgs/nixos/modules/system/boot/plymouth.nix
index 16bca40993ae..85f0fd4622df 100644
--- a/nixpkgs/nixos/modules/system/boot/plymouth.nix
+++ b/nixpkgs/nixos/modules/system/boot/plymouth.nix
@@ -4,7 +4,6 @@ with lib;
 
 let
 
-  inherit (pkgs) nixos-icons;
   plymouth = pkgs.plymouth.override {
     systemd = config.boot.initrd.systemd.package;
   };
@@ -97,8 +96,8 @@ in
       logo = mkOption {
         type = types.path;
         # Dimensions are 48x48 to match GDM logo
-        default = "${nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png";
-        defaultText = literalExpression ''"''${nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png"'';
+        default = "${pkgs.nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png";
+        defaultText = literalExpression ''"''${pkgs.nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png"'';
         example = literalExpression ''
           pkgs.fetchurl {
             url = "https://nixos.org/logo/nixos-hires.png";
@@ -107,6 +106,7 @@ in
         '';
         description = lib.mdDoc ''
           Logo which is displayed on the splash screen.
+          Currently supports PNG file format only.
         '';
       };
 
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix b/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
index e4f61db0cd02..06359f273846 100644
--- a/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
@@ -392,7 +392,10 @@ in {
 
     boot.kernelParams = [
       "root=${config.boot.initrd.systemd.root}"
-    ] ++ lib.optional (config.boot.resumeDevice != "") "resume=${config.boot.resumeDevice}";
+    ] ++ lib.optional (config.boot.resumeDevice != "") "resume=${config.boot.resumeDevice}"
+      # `systemd` mounts root in initrd as read-only unless "rw" is on the kernel command line.
+      # For NixOS activation to succeed, we need to have root writable in initrd.
+      ++ lib.optional (config.boot.initrd.systemd.root == "gpt-auto") "rw";
 
     boot.initrd.systemd = {
       initrdBin = [pkgs.bash pkgs.coreutils cfg.package.kmod cfg.package];
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix
index 365cb46ff2fe..6719a03610d1 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix
@@ -7,6 +7,7 @@ let
       device = "none";
       fsType = "envfs";
       options = [
+        "bind-mount=/bin"
         "fallback-path=${pkgs.runCommand "fallback-path" {} (''
           mkdir -p $out
           ln -s ${config.environment.usrbinenv} $out/env
@@ -15,6 +16,9 @@ let
         "nofail"
       ];
     };
+    # We need to bind-mount /bin to /usr/bin, because otherwise upgrading
+    # from envfs < 1.0.5 will cause having the old envs with no /bin bind mount.
+    # Systemd is smart enough to not mount /bin if it's already mounted.
     "/bin" = {
       device = "/usr/bin";
       fsType = "none";
diff --git a/nixpkgs/nixos/modules/virtualisation/incus.nix b/nixpkgs/nixos/modules/virtualisation/incus.nix
index da7873c7bec8..1ceaa40cca9d 100644
--- a/nixpkgs/nixos/modules/virtualisation/incus.nix
+++ b/nixpkgs/nixos/modules/virtualisation/incus.nix
@@ -1,8 +1,80 @@
-{ config, lib, pkgs, ... }:
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
 
 let
   cfg = config.virtualisation.incus;
   preseedFormat = pkgs.formats.yaml { };
+
+  serverBinPath = ''${pkgs.qemu_kvm}/libexec:${
+    lib.makeBinPath (
+      with pkgs;
+      [
+        cfg.package
+
+        acl
+        attr
+        bash
+        btrfs-progs
+        cdrkit
+        coreutils
+        criu
+        dnsmasq
+        e2fsprogs
+        findutils
+        getent
+        gnugrep
+        gnused
+        gnutar
+        gptfdisk
+        gzip
+        iproute2
+        iptables
+        kmod
+        lvm2
+        minio
+        nftables
+        qemu_kvm
+        qemu-utils
+        rsync
+        squashfsTools
+        systemd
+        thin-provisioning-tools
+        util-linux
+        virtiofsd
+        xz
+
+        (writeShellScriptBin "apparmor_parser" ''
+          exec '${apparmor-parser}/bin/apparmor_parser' -I '${apparmor-profiles}/etc/apparmor.d' "$@"
+        '')
+      ]
+      ++ lib.optionals config.boot.zfs.enabled [
+        config.boot.zfs.package
+        "${config.boot.zfs.package}/lib/udev"
+      ]
+      ++ lib.optionals config.virtualisation.vswitch.enable [ config.virtualisation.vswitch.package ]
+    )
+  }'';
+
+  # https://github.com/lxc/incus/blob/cff35a29ee3d7a2af1f937cbb6cf23776941854b/internal/server/instance/drivers/driver_qemu.go#L123
+  ovmf-prefix = if pkgs.stdenv.hostPlatform.isAarch64 then "AAVMF" else "OVMF";
+  ovmf = pkgs.linkFarm "incus-ovmf" [
+    {
+      name = "OVMF_CODE.4MB.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_CODE.fd";
+    }
+    {
+      name = "OVMF_VARS.4MB.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_VARS.fd";
+    }
+    {
+      name = "OVMF_VARS.4MB.ms.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_VARS.fd";
+    }
+  ];
 in
 {
   meta = {
@@ -11,26 +83,29 @@ in
 
   options = {
     virtualisation.incus = {
-      enable = lib.mkEnableOption (lib.mdDoc ''
+      enable = lib.mkEnableOption ''
         incusd, a daemon that manages containers and virtual machines.
 
         Users in the "incus-admin" group can interact with
         the daemon (e.g. to start or stop containers) using the
         {command}`incus` command line tool, among others.
-      '');
+      '';
 
       package = lib.mkPackageOption pkgs "incus" { };
 
       lxcPackage = lib.mkPackageOption pkgs "lxc" { };
 
+      clientPackage = lib.mkPackageOption pkgs [
+        "incus"
+        "client"
+      ] { };
+
       preseed = lib.mkOption {
-        type = lib.types.nullOr (
-          lib.types.submodule { freeformType = preseedFormat.type; }
-        );
+        type = lib.types.nullOr (lib.types.submodule { freeformType = preseedFormat.type; });
 
         default = null;
 
-        description = lib.mdDoc ''
+        description = ''
           Configuration for Incus preseed, see
           <https://linuxcontainers.org/incus/docs/main/howto/initialize/#non-interactive-configuration>
           for supported values.
@@ -80,18 +155,16 @@ in
         };
       };
 
-      socketActivation = lib.mkEnableOption (
-        lib.mdDoc ''
-          socket-activation for starting incus.service. Enabling this option
-          will stop incus.service from starting automatically on boot.
-        ''
-      );
+      socketActivation = lib.mkEnableOption (''
+        socket-activation for starting incus.service. Enabling this option
+        will stop incus.service from starting automatically on boot.
+      '');
 
       startTimeout = lib.mkOption {
         type = lib.types.ints.unsigned;
         default = 600;
         apply = toString;
-        description = lib.mdDoc ''
+        description = ''
           Time to wait (in seconds) for incusd to become ready to process requests.
           If incusd does not reply within the configured time, `incus.service` will be
           considered failed and systemd will attempt to restart it.
@@ -99,9 +172,12 @@ in
       };
 
       ui = {
-        enable = lib.mkEnableOption (lib.mdDoc "(experimental) Incus UI");
+        enable = lib.mkEnableOption "(experimental) Incus UI";
 
-        package = lib.mkPackageOption pkgs [ "incus" "ui" ] { };
+        package = lib.mkPackageOption pkgs [
+          "incus"
+          "ui"
+        ] { };
       };
     };
   };
@@ -109,7 +185,12 @@ in
   config = lib.mkIf cfg.enable {
     assertions = [
       {
-        assertion = !(config.networking.firewall.enable && !config.networking.nftables.enable && config.virtualisation.incus.enable);
+        assertion =
+          !(
+            config.networking.firewall.enable
+            && !config.networking.nftables.enable
+            && config.virtualisation.incus.enable
+          );
         message = "Incus on NixOS is unsupported using iptables. Set `networking.nftables.enable = true;`";
       }
     ];
@@ -137,7 +218,12 @@ in
       "vhost_vsock"
     ] ++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
 
-    environment.systemPackages = [ cfg.package ];
+    environment.systemPackages = [
+      cfg.clientPackage
+
+      # gui console support
+      pkgs.spice-gtk
+    ];
 
     # Note: the following options are also declared in virtualisation.lxc, but
     # the latter can't be simply enabled to reuse the formers, because it
@@ -164,31 +250,24 @@ in
         "network-online.target"
         "lxcfs.service"
         "incus.socket"
-      ]
-        ++ lib.optional config.virtualisation.vswitch.enable "ovs-vswitchd.service";
+      ] ++ lib.optionals config.virtualisation.vswitch.enable [ "ovs-vswitchd.service" ];
 
       requires = [
         "lxcfs.service"
         "incus.socket"
-      ]
-        ++ lib.optional config.virtualisation.vswitch.enable "ovs-vswitchd.service";
-
-      wants = [
-        "network-online.target"
-      ];
+      ] ++ lib.optionals config.virtualisation.vswitch.enable [ "ovs-vswitchd.service" ];
 
-      path = lib.optionals config.boot.zfs.enabled [
-        config.boot.zfs.package
-        "${config.boot.zfs.package}/lib/udev"
-      ]
-        ++ lib.optional config.virtualisation.vswitch.enable config.virtualisation.vswitch.package;
+      wants = [ "network-online.target" ];
 
-      environment = lib.mkMerge [ {
-        # Override Path to the LXC template configuration directory
-        INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
-      } (lib.mkIf (cfg.ui.enable) {
-        "INCUS_UI" = cfg.ui.package;
-      }) ];
+      environment = lib.mkMerge [
+        {
+          INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
+          INCUS_OVMF_PATH = ovmf;
+          INCUS_USBIDS_PATH = "${pkgs.hwdata}/share/hwdata/usb.ids";
+          PATH = lib.mkForce serverBinPath;
+        }
+        (lib.mkIf (cfg.ui.enable) { "INCUS_UI" = cfg.ui.package; })
+      ];
 
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/incusd --group incus-admin";
@@ -222,15 +301,13 @@ in
     systemd.services.incus-preseed = lib.mkIf (cfg.preseed != null) {
       description = "Incus initialization with preseed file";
 
-      wantedBy = ["incus.service"];
-      after = ["incus.service"];
-      bindsTo = ["incus.service"];
-      partOf = ["incus.service"];
+      wantedBy = [ "incus.service" ];
+      after = [ "incus.service" ];
+      bindsTo = [ "incus.service" ];
+      partOf = [ "incus.service" ];
 
       script = ''
-        ${cfg.package}/bin/incus admin init --preseed <${
-          preseedFormat.generate "incus-preseed.yaml" cfg.preseed
-        }
+        ${cfg.package}/bin/incus admin init --preseed <${preseedFormat.generate "incus-preseed.yaml" cfg.preseed}
       '';
 
       serviceConfig = {
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
index 94f70c65436c..c2606968d3be 100644
--- a/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
@@ -5,14 +5,32 @@
 with lib;
 
 let
-
   cfg = config.virtualisation.virtualbox.guest;
   kernel = config.boot.kernelPackages;
 
-in
+  mkVirtualBoxUserService = serviceArgs: {
+    description = "VirtualBox Guest User Services ${serviceArgs}";
 
-{
+    wantedBy = [ "graphical-session.target" ];
+    partOf = [ "graphical-session.target" ];
+
+    # The graphical session may not be ready when starting the service
+    # Hence, check if the DISPLAY env var is set, otherwise fail, wait and retry again
+    startLimitBurst = 20;
+
+    unitConfig.ConditionVirtualization = "oracle";
 
+    # Check if the display environment is ready, otherwise fail
+    preStart = "${pkgs.bash}/bin/bash -c \"if [ -z $DISPLAY ]; then exit 1; fi\"";
+    serviceConfig = {
+      ExecStart = "@${kernel.virtualboxGuestAdditions}/bin/VBoxClient --foreground ${serviceArgs}";
+      # Wait after a failure, hoping that the display environment is ready after waiting
+      RestartSec = 2;
+      Restart = "always";
+    };
+  };
+in
+{
   ###### interface
 
   options.virtualisation.virtualbox.guest = {
@@ -22,32 +40,45 @@ in
       description = lib.mdDoc "Whether to enable the VirtualBox service and other guest additions.";
     };
 
-    x11 = mkOption {
+    clipboard = mkOption {
       default = true;
       type = types.bool;
-      description = lib.mdDoc "Whether to enable x11 graphics";
+      description = lib.mdDoc "Whether to enable clipboard support.";
+    };
+
+    seamless = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc "Whether to enable seamless mode. When activated windows from the guest appear next to the windows of the host.";
+    };
+
+    draganddrop = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc "Whether to enable drag and drop support.";
     };
   };
 
   ###### implementation
 
-  config = mkIf cfg.enable (mkMerge [{
-    assertions = [{
-      assertion = pkgs.stdenv.hostPlatform.isx86;
-      message = "Virtualbox not currently supported on ${pkgs.stdenv.hostPlatform.system}";
-    }];
+  config = mkIf cfg.enable (mkMerge [
+    {
+      assertions = [{
+        assertion = pkgs.stdenv.hostPlatform.isx86;
+        message = "Virtualbox not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+      }];
 
-    environment.systemPackages = [ kernel.virtualboxGuestAdditions ];
+      environment.systemPackages = [ kernel.virtualboxGuestAdditions ];
 
-    boot.extraModulePackages = [ kernel.virtualboxGuestAdditions ];
+      boot.extraModulePackages = [ kernel.virtualboxGuestAdditions ];
 
-    boot.supportedFilesystems = [ "vboxsf" ];
-    boot.initrd.supportedFilesystems = [ "vboxsf" ];
+      boot.supportedFilesystems = [ "vboxsf" ];
+      boot.initrd.supportedFilesystems = [ "vboxsf" ];
 
-    users.groups.vboxsf.gid = config.ids.gids.vboxsf;
+      users.groups.vboxsf.gid = config.ids.gids.vboxsf;
 
-    systemd.services.virtualbox =
-      { description = "VirtualBox Guest Services";
+      systemd.services.virtualbox = {
+        description = "VirtualBox Guest Services";
 
         wantedBy = [ "multi-user.target" ];
         requires = [ "dev-vboxguest.device" ];
@@ -58,36 +89,27 @@ in
         serviceConfig.ExecStart = "@${kernel.virtualboxGuestAdditions}/bin/VBoxService VBoxService --foreground";
       };
 
-    services.udev.extraRules =
-      ''
-        # /dev/vboxuser is necessary for VBoxClient to work.  Maybe we
-        # should restrict this to logged-in users.
-        KERNEL=="vboxuser",  OWNER="root", GROUP="root", MODE="0666"
-
-        # Allow systemd dependencies on vboxguest.
-        SUBSYSTEM=="misc", KERNEL=="vboxguest", TAG+="systemd"
-      '';
-  } (mkIf cfg.x11 {
-    services.xserver.videoDrivers = [ "vmware" "virtualbox" "modesetting" ];
-
-    services.xserver.config =
-      ''
-        Section "InputDevice"
-          Identifier "VBoxMouse"
-          Driver "vboxmouse"
-        EndSection
-      '';
-
-    services.xserver.serverLayoutSection =
-      ''
-        InputDevice "VBoxMouse"
-      '';
-
-    services.xserver.displayManager.sessionCommands =
-      ''
-        PATH=${makeBinPath [ pkgs.gnugrep pkgs.which pkgs.xorg.xorgserver.out ]}:$PATH \
-          ${kernel.virtualboxGuestAdditions}/bin/VBoxClient-all
-      '';
-  })]);
-
+      services.udev.extraRules =
+        ''
+          # /dev/vboxuser is necessary for VBoxClient to work.  Maybe we
+          # should restrict this to logged-in users.
+          KERNEL=="vboxuser",  OWNER="root", GROUP="root", MODE="0666"
+
+          # Allow systemd dependencies on vboxguest.
+          SUBSYSTEM=="misc", KERNEL=="vboxguest", TAG+="systemd"
+        '';
+
+      systemd.user.services.virtualboxClientVmsvga = mkVirtualBoxUserService "--vmsvga-session";
+    }
+    (
+      mkIf cfg.clipboard {
+        systemd.user.services.virtualboxClientClipboard = mkVirtualBoxUserService "--clipboard";
+      }
+    )
+    (
+      mkIf cfg.seamless {
+        systemd.user.services.virtualboxClientSeamless = mkVirtualBoxUserService "--seamless";
+      }
+    )
+  ]);
 }