about summary refs log tree commit diff
path: root/nixpkgs/nixos
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2024-04-10 20:43:08 +0200
committerAlyssa Ross <hi@alyssa.is>2024-04-10 20:43:08 +0200
commit69bfdf2484041b9d242840c4e5017b4703383bb0 (patch)
treed8bdaa69e7990d7d6f09b594b3c425f742acd2d0 /nixpkgs/nixos
parentc8aee4b4363b6bf905a521b05b7476960e8286c8 (diff)
parentd8fe5e6c92d0d190646fb9f1056741a229980089 (diff)
downloadnixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar.gz
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar.bz2
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar.lz
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar.xz
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.tar.zst
nixlib-69bfdf2484041b9d242840c4e5017b4703383bb0.zip
Merge commit 'd8fe5e6c'
Conflicts:
	nixpkgs/pkgs/build-support/go/module.nix
Diffstat (limited to 'nixpkgs/nixos')
-rw-r--r--nixpkgs/nixos/doc/manual/administration/cleaning-store.chapter.md6
-rw-r--r--nixpkgs/nixos/doc/manual/administration/container-networking.section.md12
-rw-r--r--nixpkgs/nixos/doc/manual/administration/control-groups.chapter.md8
-rw-r--r--nixpkgs/nixos/doc/manual/administration/declarative-containers.section.md28
-rw-r--r--nixpkgs/nixos/doc/manual/administration/service-mgmt.chapter.md4
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/abstractions.section.md6
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/ad-hoc-network-config.section.md10
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/adding-custom-packages.section.md34
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/config-file.section.md62
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/customizing-packages.section.md52
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/declarative-packages.section.md4
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/file-systems.chapter.md10
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/firewall.section.md18
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/gpu-accel.chapter.md62
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/ipv4-config.section.md20
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/ipv6-config.section.md28
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/kubernetes.chapter.md30
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/linux-kernel.chapter.md54
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/luks-file-systems.section.md34
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/modularity.section.md10
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/network-manager.section.md16
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/overlayfs.section.md28
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles.chapter.md8
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/renaming-interfaces.section.md20
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/ssh.section.md10
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/subversion.chapter.md51
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/user-mgmt.chapter.md28
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/wayland.chapter.md6
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/wireless.section.md40
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/x-windows.chapter.md148
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/xfce.chapter.md22
-rw-r--r--nixpkgs/nixos/doc/manual/development/activation-script.section.md16
-rw-r--r--nixpkgs/nixos/doc/manual/development/assertions.section.md4
-rw-r--r--nixpkgs/nixos/doc/manual/development/etc-overlay.section.md8
-rw-r--r--nixpkgs/nixos/doc/manual/development/meta-attributes.section.md4
-rw-r--r--nixpkgs/nixos/doc/manual/development/non-switchable-systems.section.md2
-rw-r--r--nixpkgs/nixos/doc/manual/development/option-declarations.section.md42
-rw-r--r--nixpkgs/nixos/doc/manual/development/option-def.section.md78
-rw-r--r--nixpkgs/nixos/doc/manual/development/option-types.section.md122
-rw-r--r--nixpkgs/nixos/doc/manual/development/settings-options.section.md42
-rw-r--r--nixpkgs/nixos/doc/manual/development/unit-handling.section.md12
-rw-r--r--nixpkgs/nixos/doc/manual/development/writing-modules.chapter.md8
-rw-r--r--nixpkgs/nixos/doc/manual/development/writing-nixos-tests.section.md16
-rw-r--r--nixpkgs/nixos/doc/manual/installation/building-images-via-systemd-repart.chapter.md30
-rw-r--r--nixpkgs/nixos/doc/manual/installation/changing-config.chapter.md4
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing-behind-a-proxy.section.md6
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing-from-other-distro.section.md18
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing-virtualbox-guest.section.md8
-rw-r--r--nixpkgs/nixos/doc/manual/installation/upgrading.chapter.md10
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1509.section.md8
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1703.section.md4
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2009.section.md34
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2205.section.md4
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2211.section.md10
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2305.section.md64
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md42
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2405.section.md38
-rw-r--r--nixpkgs/nixos/lib/make-options-doc/default.nix16
-rw-r--r--nixpkgs/nixos/lib/test-driver/test_driver/machine.py1
-rw-r--r--nixpkgs/nixos/modules/config/users-groups.nix36
-rw-r--r--nixpkgs/nixos/modules/hardware/video/nvidia.nix13
-rw-r--r--nixpkgs/nixos/modules/i18n/input-method/default.md70
-rw-r--r--nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl2
-rw-r--r--nixpkgs/nixos/modules/module-list.nix10
-rw-r--r--nixpkgs/nixos/modules/profiles/all-hardware.nix9
-rw-r--r--nixpkgs/nixos/modules/profiles/macos-builder.nix5
-rw-r--r--nixpkgs/nixos/modules/programs/digitalbitbox/default.md36
-rw-r--r--nixpkgs/nixos/modules/programs/goldwarden.nix50
-rw-r--r--nixpkgs/nixos/modules/programs/nix-ld.nix42
-rw-r--r--nixpkgs/nixos/modules/programs/plotinus.md6
-rw-r--r--nixpkgs/nixos/modules/programs/steam.nix4
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md8
-rw-r--r--nixpkgs/nixos/modules/security/acme/default.md366
-rw-r--r--nixpkgs/nixos/modules/services/audio/castopod.md20
-rw-r--r--nixpkgs/nixos/modules/services/audio/roon-server.nix3
-rw-r--r--nixpkgs/nixos/modules/services/backup/borgbackup.md31
-rw-r--r--nixpkgs/nixos/modules/services/databases/foundationdb.md20
-rw-r--r--nixpkgs/nixos/modules/services/databases/postgresql.md42
-rw-r--r--nixpkgs/nixos/modules/services/databases/tigerbeetle.md8
-rw-r--r--nixpkgs/nixos/modules/services/desktops/flatpak.md8
-rw-r--r--nixpkgs/nixos/modules/services/desktops/flatpak.nix2
-rw-r--r--nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix145
-rw-r--r--nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix83
-rw-r--r--nixpkgs/nixos/modules/services/development/athens.md8
-rw-r--r--nixpkgs/nixos/modules/services/development/blackfire.md2
-rw-r--r--nixpkgs/nixos/modules/services/development/livebook.md8
-rw-r--r--nixpkgs/nixos/modules/services/editors/emacs.md22
-rw-r--r--nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix2
-rw-r--r--nixpkgs/nixos/modules/services/mail/mailman.md4
-rw-r--r--nixpkgs/nixos/modules/services/matrix/maubot.md90
-rw-r--r--nixpkgs/nixos/modules/services/matrix/mjolnir.md4
-rw-r--r--nixpkgs/nixos/modules/services/matrix/synapse.md6
-rw-r--r--nixpkgs/nixos/modules/services/misc/anki-sync-server.md6
-rw-r--r--nixpkgs/nixos/modules/services/misc/forgejo.md40
-rw-r--r--nixpkgs/nixos/modules/services/misc/gitlab.md82
-rw-r--r--nixpkgs/nixos/modules/services/misc/mollysocket.nix133
-rw-r--r--nixpkgs/nixos/modules/services/misc/paperless.nix16
-rw-r--r--nixpkgs/nixos/modules/services/misc/sourcehut/default.md8
-rw-r--r--nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix6
-rw-r--r--nixpkgs/nixos/modules/services/misc/weechat.md4
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/certspotter.md48
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/goss.md2
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/parsedmarc.md124
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md14
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md2
-rw-r--r--nixpkgs/nixos/modules/services/networking/dnsproxy.nix106
-rw-r--r--nixpkgs/nixos/modules/services/networking/firefox-syncserver.md24
-rw-r--r--nixpkgs/nixos/modules/services/networking/microsocks.nix146
-rw-r--r--nixpkgs/nixos/modules/services/networking/mosquitto.md73
-rw-r--r--nixpkgs/nixos/modules/services/networking/netbird.md26
-rw-r--r--nixpkgs/nixos/modules/services/networking/pleroma.md196
-rw-r--r--nixpkgs/nixos/modules/services/networking/prosody.md60
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-control.nix69
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix64
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix74
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion-router.nix49
-rw-r--r--nixpkgs/nixos/modules/services/networking/scion/scion.nix39
-rw-r--r--nixpkgs/nixos/modules/services/networking/yggdrasil.md6
-rw-r--r--nixpkgs/nixos/modules/services/search/meilisearch.md4
-rw-r--r--nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix35
-rw-r--r--nixpkgs/nixos/modules/services/security/usbguard.nix4
-rw-r--r--nixpkgs/nixos/modules/services/video/photonvision.nix64
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/akkoma.md284
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md4
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/discourse.md228
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/gotosocial.md52
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/grocy.md6
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md4
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/kavita.nix63
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/keycloak.md24
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/lemmy.md14
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/nextcloud.md6
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/peertube.nix292
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/pict-rs.md4
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/plausible.md2
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/pretix.nix2
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/slskd.nix326
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/suwayomi-server.md2
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/garage.md2
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md110
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md37
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix1
-rw-r--r--nixpkgs/nixos/modules/services/x11/xserver.nix2
-rw-r--r--nixpkgs/nixos/modules/system/boot/clevis.md12
-rw-r--r--nixpkgs/nixos/modules/system/boot/initrd-ssh.nix30
-rw-r--r--nixpkgs/nixos/modules/system/boot/plymouth.nix6
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/initrd.nix5
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/envfs.nix4
-rw-r--r--nixpkgs/nixos/modules/virtualisation/incus.nix165
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix120
-rw-r--r--nixpkgs/nixos/release-combined.nix5
-rw-r--r--nixpkgs/nixos/tests/all-tests.nix7
-rw-r--r--nixpkgs/nixos/tests/armagetronad.nix12
-rw-r--r--nixpkgs/nixos/tests/firefoxpwa.nix36
-rw-r--r--nixpkgs/nixos/tests/goss.nix8
-rw-r--r--nixpkgs/nixos/tests/incus/container.nix15
-rw-r--r--nixpkgs/nixos/tests/incus/default.nix18
-rw-r--r--nixpkgs/nixos/tests/incus/lxd-to-incus.nix2
-rw-r--r--nixpkgs/nixos/tests/k3s/multi-node.nix4
-rw-r--r--nixpkgs/nixos/tests/k3s/single-node.nix4
-rw-r--r--nixpkgs/nixos/tests/kavita.nix46
-rw-r--r--nixpkgs/nixos/tests/make-test-python.nix2
-rw-r--r--nixpkgs/nixos/tests/mollysocket.nix27
-rw-r--r--nixpkgs/nixos/tests/monetdb.nix77
-rw-r--r--nixpkgs/nixos/tests/nix-ld.nix52
-rw-r--r--nixpkgs/nixos/tests/nixos-rebuild-install-bootloader.nix2
-rw-r--r--nixpkgs/nixos/tests/photonvision.nix21
-rw-r--r--nixpkgs/nixos/tests/scion/freestanding-deployment/README.rst12
-rw-r--r--nixpkgs/nixos/tests/scion/freestanding-deployment/default.nix172
-rw-r--r--nixpkgs/nixos/tests/scion/freestanding-deployment/topology1.json51
-rw-r--r--nixpkgs/nixos/tests/scion/freestanding-deployment/topology2.json51
-rw-r--r--nixpkgs/nixos/tests/scion/freestanding-deployment/topology3.json60
-rw-r--r--nixpkgs/nixos/tests/scion/freestanding-deployment/topology4.json40
-rw-r--r--nixpkgs/nixos/tests/scion/freestanding-deployment/topology5.json40
-rw-r--r--nixpkgs/nixos/tests/systemd-user-linger.nix39
-rw-r--r--nixpkgs/nixos/tests/tracee.nix93
-rw-r--r--nixpkgs/nixos/tests/ustreamer.nix75
-rw-r--r--nixpkgs/nixos/tests/web-apps/peertube.nix28
178 files changed, 4723 insertions, 2292 deletions
diff --git a/nixpkgs/nixos/doc/manual/administration/cleaning-store.chapter.md b/nixpkgs/nixos/doc/manual/administration/cleaning-store.chapter.md
index c9140d0869c7..4f404882055a 100644
--- a/nixpkgs/nixos/doc/manual/administration/cleaning-store.chapter.md
+++ b/nixpkgs/nixos/doc/manual/administration/cleaning-store.chapter.md
@@ -21,8 +21,10 @@ You can tell NixOS in `configuration.nix` to run this unit automatically
 at certain points in time, for instance, every night at 03:15:
 
 ```nix
-nix.gc.automatic = true;
-nix.gc.dates = "03:15";
+{
+  nix.gc.automatic = true;
+  nix.gc.dates = "03:15";
+}
 ```
 
 The commands above do not remove garbage collector roots, such as old
diff --git a/nixpkgs/nixos/doc/manual/administration/container-networking.section.md b/nixpkgs/nixos/doc/manual/administration/container-networking.section.md
index 0873768376cc..723cf211d872 100644
--- a/nixpkgs/nixos/doc/manual/administration/container-networking.section.md
+++ b/nixpkgs/nixos/doc/manual/administration/container-networking.section.md
@@ -26,9 +26,11 @@ host to rewrite container traffic to use your external IP address. This
 can be accomplished using the following configuration on the host:
 
 ```nix
-networking.nat.enable = true;
-networking.nat.internalInterfaces = ["ve-+"];
-networking.nat.externalInterface = "eth0";
+{
+  networking.nat.enable = true;
+  networking.nat.internalInterfaces = ["ve-+"];
+  networking.nat.externalInterface = "eth0";
+}
 ```
 
 where `eth0` should be replaced with the desired external interface.
@@ -38,7 +40,9 @@ If you are using Network Manager, you need to explicitly prevent it from
 managing container interfaces:
 
 ```nix
-networking.networkmanager.unmanaged = [ "interface-name:ve-*" ];
+{
+  networking.networkmanager.unmanaged = [ "interface-name:ve-*" ];
+}
 ```
 
 You may need to restart your system for the changes to take effect.
diff --git a/nixpkgs/nixos/doc/manual/administration/control-groups.chapter.md b/nixpkgs/nixos/doc/manual/administration/control-groups.chapter.md
index abe8dd80b5ab..8682236ca1a9 100644
--- a/nixpkgs/nixos/doc/manual/administration/control-groups.chapter.md
+++ b/nixpkgs/nixos/doc/manual/administration/control-groups.chapter.md
@@ -39,7 +39,9 @@ they were in the same cgroup, then the PostgreSQL process would get
 `configuration.nix`:
 
 ```nix
-systemd.services.httpd.serviceConfig.CPUShares = 512;
+{
+  systemd.services.httpd.serviceConfig.CPUShares = 512;
+}
 ```
 
 By default, every cgroup has 1024 CPU shares, so this will halve the CPU
@@ -52,7 +54,9 @@ limits can be specified in `configuration.nix`; for instance, to limit
 `httpd.service` to 512 MiB of RAM (excluding swap):
 
 ```nix
-systemd.services.httpd.serviceConfig.MemoryLimit = "512M";
+{
+  systemd.services.httpd.serviceConfig.MemoryLimit = "512M";
+}
 ```
 
 The command `systemd-cgtop` shows a continuously updated list of all
diff --git a/nixpkgs/nixos/doc/manual/administration/declarative-containers.section.md b/nixpkgs/nixos/doc/manual/administration/declarative-containers.section.md
index eaa50d3c663d..f16fa8332b51 100644
--- a/nixpkgs/nixos/doc/manual/administration/declarative-containers.section.md
+++ b/nixpkgs/nixos/doc/manual/administration/declarative-containers.section.md
@@ -5,13 +5,15 @@ You can also specify containers and their configuration in the host's
 shall be a container named `database` running PostgreSQL:
 
 ```nix
-containers.database =
-  { config =
-      { config, pkgs, ... }:
-      { services.postgresql.enable = true;
-      services.postgresql.package = pkgs.postgresql_14;
-      };
-  };
+{
+  containers.database =
+    { config =
+        { config, pkgs, ... }:
+        { services.postgresql.enable = true;
+        services.postgresql.package = pkgs.postgresql_14;
+        };
+    };
+}
 ```
 
 If you run `nixos-rebuild switch`, the container will be built. If the
@@ -25,11 +27,13 @@ cannot change the network configuration. You can give a container its
 own network as follows:
 
 ```nix
-containers.database = {
-  privateNetwork = true;
-  hostAddress = "192.168.100.10";
-  localAddress = "192.168.100.11";
-};
+{
+  containers.database = {
+    privateNetwork = true;
+    hostAddress = "192.168.100.10";
+    localAddress = "192.168.100.11";
+  };
+}
 ```
 
 This gives the container a private virtual Ethernet interface with IP
diff --git a/nixpkgs/nixos/doc/manual/administration/service-mgmt.chapter.md b/nixpkgs/nixos/doc/manual/administration/service-mgmt.chapter.md
index bc9bdbe3708b..49e8d801fb80 100644
--- a/nixpkgs/nixos/doc/manual/administration/service-mgmt.chapter.md
+++ b/nixpkgs/nixos/doc/manual/administration/service-mgmt.chapter.md
@@ -82,7 +82,9 @@ In order to enable a systemd *system* service with provided upstream
 package, use (e.g):
 
 ```nix
-systemd.packages = [ pkgs.packagekit ];
+{
+  systemd.packages = [ pkgs.packagekit ];
+}
 ```
 
 Usually NixOS modules written by the community do the above, plus take
diff --git a/nixpkgs/nixos/doc/manual/configuration/abstractions.section.md b/nixpkgs/nixos/doc/manual/configuration/abstractions.section.md
index bf26e4c51ed3..5bc44aa72245 100644
--- a/nixpkgs/nixos/doc/manual/configuration/abstractions.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/abstractions.section.md
@@ -47,9 +47,9 @@ You can write a `let` wherever an expression is allowed. Thus, you also could ha
 ```nix
 {
   services.httpd.virtualHosts =
-    let commonConfig = ...; in
-    { "blog.example.org" = (commonConfig // { ... })
-      "wiki.example.org" = (commonConfig // { ... })
+    let commonConfig = { /* ... */ }; in
+    { "blog.example.org" = (commonConfig // { /* ... */ });
+      "wiki.example.org" = (commonConfig // { /* ... */ });
     };
 }
 ```
diff --git a/nixpkgs/nixos/doc/manual/configuration/ad-hoc-network-config.section.md b/nixpkgs/nixos/doc/manual/configuration/ad-hoc-network-config.section.md
index 4478d77f361d..ecb06ad984a3 100644
--- a/nixpkgs/nixos/doc/manual/configuration/ad-hoc-network-config.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/ad-hoc-network-config.section.md
@@ -6,8 +6,10 @@ is useful for doing network configuration not covered by the existing NixOS
 modules. For instance, to statically configure an IPv6 address:
 
 ```nix
-networking.localCommands =
-  ''
-    ip -6 addr add 2001:610:685:1::1/64 dev eth0
-  '';
+{
+  networking.localCommands =
+    ''
+      ip -6 addr add 2001:610:685:1::1/64 dev eth0
+    '';
+}
 ```
diff --git a/nixpkgs/nixos/doc/manual/configuration/adding-custom-packages.section.md b/nixpkgs/nixos/doc/manual/configuration/adding-custom-packages.section.md
index 2340723e07c6..f9a5221d6c93 100644
--- a/nixpkgs/nixos/doc/manual/configuration/adding-custom-packages.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/adding-custom-packages.section.md
@@ -23,7 +23,9 @@ Then you write and test the package as described in the Nixpkgs manual.
 Finally, you add it to [](#opt-environment.systemPackages), e.g.
 
 ```nix
-environment.systemPackages = [ pkgs.my-package ];
+{
+  environment.systemPackages = [ pkgs.my-package ];
+}
 ```
 
 and you run `nixos-rebuild`, specifying your own Nixpkgs tree:
@@ -38,24 +40,28 @@ tree. For instance, here is how you specify a build of the
 `configuration.nix`:
 
 ```nix
-environment.systemPackages =
-  let
-    my-hello = with pkgs; stdenv.mkDerivation rec {
-      name = "hello-2.8";
-      src = fetchurl {
-        url = "mirror://gnu/hello/${name}.tar.gz";
-        hash = "sha256-5rd/gffPfa761Kn1tl3myunD8TuM+66oy1O7XqVGDXM=";
+{
+  environment.systemPackages =
+    let
+      my-hello = with pkgs; stdenv.mkDerivation rec {
+        name = "hello-2.8";
+        src = fetchurl {
+          url = "mirror://gnu/hello/${name}.tar.gz";
+          hash = "sha256-5rd/gffPfa761Kn1tl3myunD8TuM+66oy1O7XqVGDXM=";
+        };
       };
-    };
-  in
-  [ my-hello ];
+    in
+    [ my-hello ];
+}
 ```
 
 Of course, you can also move the definition of `my-hello` into a
 separate Nix expression, e.g.
 
 ```nix
-environment.systemPackages = [ (import ./my-hello.nix) ];
+{
+  environment.systemPackages = [ (import ./my-hello.nix) ];
+}
 ```
 
 where `my-hello.nix` contains:
@@ -88,7 +94,9 @@ section](#module-services-flatpak). AppImages will not run "as-is" on NixOS.
 First you need to install `appimage-run`: add to `/etc/nixos/configuration.nix`
 
 ```nix
-environment.systemPackages = [ pkgs.appimage-run ];
+{
+  environment.systemPackages = [ pkgs.appimage-run ];
+}
 ```
 
 Then instead of running the AppImage "as-is", run `appimage-run foo.appimage`.
diff --git a/nixpkgs/nixos/doc/manual/configuration/config-file.section.md b/nixpkgs/nixos/doc/manual/configuration/config-file.section.md
index b010026c5828..e213aae29ae3 100644
--- a/nixpkgs/nixos/doc/manual/configuration/config-file.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/config-file.section.md
@@ -5,7 +5,7 @@ The NixOS configuration file generally looks like this:
 ```nix
 { config, pkgs, ... }:
 
-{ option definitions
+{ /* option definitions */
 }
 ```
 
@@ -80,7 +80,9 @@ Strings
 :   Strings are enclosed in double quotes, e.g.
 
     ```nix
-    networking.hostName = "dexter";
+    {
+      networking.hostName = "dexter";
+    }
     ```
 
     Special characters can be escaped by prefixing them with a backslash
@@ -89,11 +91,13 @@ Strings
     Multi-line strings can be enclosed in *double single quotes*, e.g.
 
     ```nix
-    networking.extraHosts =
-      ''
-        127.0.0.2 other-localhost
-        10.0.0.1 server
-      '';
+    {
+      networking.extraHosts =
+        ''
+          127.0.0.2 other-localhost
+          10.0.0.1 server
+        '';
+    }
     ```
 
     The main difference is that it strips from each line a number of
@@ -108,8 +112,10 @@ Booleans
 :   These can be `true` or `false`, e.g.
 
     ```nix
-    networking.firewall.enable = true;
-    networking.firewall.allowPing = false;
+    {
+      networking.firewall.enable = true;
+      networking.firewall.allowPing = false;
+    }
     ```
 
 Integers
@@ -117,7 +123,9 @@ Integers
 :   For example,
 
     ```nix
-    boot.kernel.sysctl."net.ipv4.tcp_keepalive_time" = 60;
+    {
+      boot.kernel.sysctl."net.ipv4.tcp_keepalive_time" = 60;
+    }
     ```
 
     (Note that here the attribute name `net.ipv4.tcp_keepalive_time` is
@@ -132,11 +140,13 @@ Sets
     braces, as in the option definition
 
     ```nix
-    fileSystems."/boot" =
-      { device = "/dev/sda1";
-        fsType = "ext4";
-        options = [ "rw" "data=ordered" "relatime" ];
-      };
+    {
+      fileSystems."/boot" =
+        { device = "/dev/sda1";
+          fsType = "ext4";
+          options = [ "rw" "data=ordered" "relatime" ];
+        };
+    }
     ```
 
 Lists
@@ -145,13 +155,17 @@ Lists
     separated by whitespace, like this:
 
     ```nix
-    boot.kernelModules = [ "fuse" "kvm-intel" "coretemp" ];
+    {
+      boot.kernelModules = [ "fuse" "kvm-intel" "coretemp" ];
+    }
     ```
 
     List elements can be any other type, e.g. sets:
 
     ```nix
-    swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
+    {
+      swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
+    }
     ```
 
 Packages
@@ -161,12 +175,14 @@ Packages
     argument `pkgs`. Typical uses:
 
     ```nix
-    environment.systemPackages =
-      [ pkgs.thunderbird
-        pkgs.emacs
-      ];
-
-    services.postgresql.package = pkgs.postgresql_14;
+    {
+      environment.systemPackages =
+        [ pkgs.thunderbird
+          pkgs.emacs
+        ];
+
+      services.postgresql.package = pkgs.postgresql_14;
+    }
     ```
 
     The latter option definition changes the default PostgreSQL package
diff --git a/nixpkgs/nixos/doc/manual/configuration/customizing-packages.section.md b/nixpkgs/nixos/doc/manual/configuration/customizing-packages.section.md
index 76413b7d84fb..a524ef266eaf 100644
--- a/nixpkgs/nixos/doc/manual/configuration/customizing-packages.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/customizing-packages.section.md
@@ -16,18 +16,20 @@ Examples include:
 
 You can use them like this:
 ```nix
-environment.systemPackages = with pkgs; [
-  sl
-  (pass.withExtensions (subpkgs: with subpkgs; [
-    pass-audit
-    pass-otp
-    pass-genphrase
-  ]))
-  (python3.withPackages (subpkgs: with subpkgs; [
-      requests
-  ]))
-  cowsay
-];
+{
+  environment.systemPackages = with pkgs; [
+    sl
+    (pass.withExtensions (subpkgs: with subpkgs; [
+      pass-audit
+      pass-otp
+      pass-genphrase
+    ]))
+    (python3.withPackages (subpkgs: with subpkgs; [
+        requests
+    ]))
+    cowsay
+  ];
+}
 ```
 :::
 
@@ -38,7 +40,9 @@ dependency on GTK 2. If you want to build it against GTK 3, you can
 specify that as follows:
 
 ```nix
-environment.systemPackages = [ (pkgs.emacs.override { gtk = pkgs.gtk3; }) ];
+{
+  environment.systemPackages = [ (pkgs.emacs.override { gtk = pkgs.gtk3; }) ];
+}
 ```
 
 The function `override` performs the call to the Nix function that
@@ -58,12 +62,14 @@ of the package, such as the source code. For instance, if you want to
 override the source code of Emacs, you can say:
 
 ```nix
-environment.systemPackages = [
-  (pkgs.emacs.overrideAttrs (oldAttrs: {
-    name = "emacs-25.0-pre";
-    src = /path/to/my/emacs/tree;
-  }))
-];
+{
+  environment.systemPackages = [
+    (pkgs.emacs.overrideAttrs (oldAttrs: {
+      name = "emacs-25.0-pre";
+      src = /path/to/my/emacs/tree;
+    }))
+  ];
+}
 ```
 
 Here, `overrideAttrs` takes the Nix derivation specified by `pkgs.emacs`
@@ -80,9 +86,11 @@ two instances of the package. If you want to have everything depend on
 your customised instance, you can apply a *global* override as follows:
 
 ```nix
-nixpkgs.config.packageOverrides = pkgs:
-  { emacs = pkgs.emacs.override { gtk = pkgs.gtk3; };
-  };
+{
+  nixpkgs.config.packageOverrides = pkgs:
+    { emacs = pkgs.emacs.override { gtk = pkgs.gtk3; };
+    };
+}
 ```
 
 The effect of this definition is essentially equivalent to modifying the
diff --git a/nixpkgs/nixos/doc/manual/configuration/declarative-packages.section.md b/nixpkgs/nixos/doc/manual/configuration/declarative-packages.section.md
index 480e250da8c7..6cdd520dcf15 100644
--- a/nixpkgs/nixos/doc/manual/configuration/declarative-packages.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/declarative-packages.section.md
@@ -7,7 +7,9 @@ following line to `configuration.nix` enables the Mozilla Thunderbird
 email application:
 
 ```nix
-environment.systemPackages = [ pkgs.thunderbird ];
+{
+  environment.systemPackages = [ pkgs.thunderbird ];
+}
 ```
 
 The effect of this specification is that the Thunderbird package from
diff --git a/nixpkgs/nixos/doc/manual/configuration/file-systems.chapter.md b/nixpkgs/nixos/doc/manual/configuration/file-systems.chapter.md
index 3dfdd20ac33e..4bdd9c60e327 100644
--- a/nixpkgs/nixos/doc/manual/configuration/file-systems.chapter.md
+++ b/nixpkgs/nixos/doc/manual/configuration/file-systems.chapter.md
@@ -6,10 +6,12 @@ Ext4 file system on device `/dev/disk/by-label/data` onto the mount
 point `/data`:
 
 ```nix
-fileSystems."/data" =
-  { device = "/dev/disk/by-label/data";
-    fsType = "ext4";
-  };
+{
+  fileSystems."/data" =
+    { device = "/dev/disk/by-label/data";
+      fsType = "ext4";
+    };
+}
 ```
 
 This will create an entry in `/etc/fstab`, which will generate a
diff --git a/nixpkgs/nixos/doc/manual/configuration/firewall.section.md b/nixpkgs/nixos/doc/manual/configuration/firewall.section.md
index dbf0ffb9273e..9a71217944ee 100644
--- a/nixpkgs/nixos/doc/manual/configuration/firewall.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/firewall.section.md
@@ -5,14 +5,18 @@ and other unexpected packets. The firewall applies to both IPv4 and IPv6
 traffic. It is enabled by default. It can be disabled as follows:
 
 ```nix
-networking.firewall.enable = false;
+{
+  networking.firewall.enable = false;
+}
 ```
 
 If the firewall is enabled, you can open specific TCP ports to the
 outside world:
 
 ```nix
-networking.firewall.allowedTCPPorts = [ 80 443 ];
+{
+  networking.firewall.allowedTCPPorts = [ 80 443 ];
+}
 ```
 
 Note that TCP port 22 (ssh) is opened automatically if the SSH daemon is
@@ -22,10 +26,12 @@ enabled (`services.openssh.enable = true`). UDP ports can be opened through
 To open ranges of TCP ports:
 
 ```nix
-networking.firewall.allowedTCPPortRanges = [
-  { from = 4000; to = 4007; }
-  { from = 8000; to = 8010; }
-];
+{
+  networking.firewall.allowedTCPPortRanges = [
+    { from = 4000; to = 4007; }
+    { from = 8000; to = 8010; }
+  ];
+}
 ```
 
 Similarly, UDP port ranges can be opened through
diff --git a/nixpkgs/nixos/doc/manual/configuration/gpu-accel.chapter.md b/nixpkgs/nixos/doc/manual/configuration/gpu-accel.chapter.md
index aa63aec61669..3b98bdd97c68 100644
--- a/nixpkgs/nixos/doc/manual/configuration/gpu-accel.chapter.md
+++ b/nixpkgs/nixos/doc/manual/configuration/gpu-accel.chapter.md
@@ -55,9 +55,11 @@ supported through the rocmPackages.clr.icd package. Adding this package to
 enables OpenCL support:
 
 ```nix
-hardware.opengl.extraPackages = [
-  rocmPackages.clr.icd
-];
+{
+  hardware.opengl.extraPackages = [
+    rocmPackages.clr.icd
+  ];
+}
 ```
 
 ### Intel {#sec-gpu-accel-opencl-intel}
@@ -74,9 +76,11 @@ to enable OpenCL support. For example, for Gen8 and later GPUs, the following
 configuration can be used:
 
 ```nix
-hardware.opengl.extraPackages = [
-  intel-compute-runtime
-];
+{
+  hardware.opengl.extraPackages = [
+    intel-compute-runtime
+  ];
+}
 ```
 
 ## Vulkan {#sec-gpu-accel-vulkan}
@@ -141,20 +145,22 @@ makes amdvlk the default driver and hides radv and lavapipe from the device list
 A specific driver can be forced as follows:
 
 ```nix
-hardware.opengl.extraPackages = [
-  pkgs.amdvlk
-];
-
-# To enable Vulkan support for 32-bit applications, also add:
-hardware.opengl.extraPackages32 = [
-  pkgs.driversi686Linux.amdvlk
-];
-
-# Force radv
-environment.variables.AMD_VULKAN_ICD = "RADV";
-# Or
-environment.variables.VK_ICD_FILENAMES =
-  "/run/opengl-driver/share/vulkan/icd.d/radeon_icd.x86_64.json";
+{
+  hardware.opengl.extraPackages = [
+    pkgs.amdvlk
+  ];
+
+  # To enable Vulkan support for 32-bit applications, also add:
+  hardware.opengl.extraPackages32 = [
+    pkgs.driversi686Linux.amdvlk
+  ];
+
+  # Force radv
+  environment.variables.AMD_VULKAN_ICD = "RADV";
+  # Or
+  environment.variables.VK_ICD_FILENAMES =
+    "/run/opengl-driver/share/vulkan/icd.d/radeon_icd.x86_64.json";
+}
 ```
 
 ## VA-API {#sec-gpu-accel-va-api}
@@ -178,17 +184,21 @@ $ nix-shell -p libva-utils --run vainfo
 Modern Intel GPUs use the iHD driver, which can be installed with:
 
 ```nix
-hardware.opengl.extraPackages = [
-  intel-media-driver
-];
+{
+  hardware.opengl.extraPackages = [
+    intel-media-driver
+  ];
+}
 ```
 
 Older Intel GPUs use the i965 driver, which can be installed with:
 
 ```nix
-hardware.opengl.extraPackages = [
-  intel-vaapi-driver
-];
+{
+  hardware.opengl.extraPackages = [
+    intel-vaapi-driver
+  ];
+}
 ```
 
 ## Common issues {#sec-gpu-accel-common-issues}
diff --git a/nixpkgs/nixos/doc/manual/configuration/ipv4-config.section.md b/nixpkgs/nixos/doc/manual/configuration/ipv4-config.section.md
index c73024b856d7..0464f5389855 100644
--- a/nixpkgs/nixos/doc/manual/configuration/ipv4-config.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/ipv4-config.section.md
@@ -5,18 +5,22 @@ configure network interfaces. However, you can configure an interface
 manually as follows:
 
 ```nix
-networking.interfaces.eth0.ipv4.addresses = [ {
-  address = "192.168.1.2";
-  prefixLength = 24;
-} ];
+{
+  networking.interfaces.eth0.ipv4.addresses = [ {
+    address = "192.168.1.2";
+    prefixLength = 24;
+  } ];
+}
 ```
 
 Typically you'll also want to set a default gateway and set of name
 servers:
 
 ```nix
-networking.defaultGateway = "192.168.1.1";
-networking.nameservers = [ "8.8.8.8" ];
+{
+  networking.defaultGateway = "192.168.1.1";
+  networking.nameservers = [ "8.8.8.8" ];
+}
 ```
 
 ::: {.note}
@@ -28,7 +32,9 @@ configuration is performed by `network-setup.service`.
 The host name is set using [](#opt-networking.hostName):
 
 ```nix
-networking.hostName = "cartman";
+{
+  networking.hostName = "cartman";
+}
 ```
 
 The default host name is `nixos`. Set it to the empty string (`""`) to
diff --git a/nixpkgs/nixos/doc/manual/configuration/ipv6-config.section.md b/nixpkgs/nixos/doc/manual/configuration/ipv6-config.section.md
index ce66f53ed472..b4fe0d759b8a 100644
--- a/nixpkgs/nixos/doc/manual/configuration/ipv6-config.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/ipv6-config.section.md
@@ -9,34 +9,42 @@ may be overridden on a per-interface basis by
 IPv6 support globally by setting:
 
 ```nix
-networking.enableIPv6 = false;
+{
+  networking.enableIPv6 = false;
+}
 ```
 
 You can disable IPv6 on a single interface using a normal sysctl (in
 this example, we use interface `eth0`):
 
 ```nix
-boot.kernel.sysctl."net.ipv6.conf.eth0.disable_ipv6" = true;
+{
+  boot.kernel.sysctl."net.ipv6.conf.eth0.disable_ipv6" = true;
+}
 ```
 
 As with IPv4 networking interfaces are automatically configured via
 DHCPv6. You can configure an interface manually:
 
 ```nix
-networking.interfaces.eth0.ipv6.addresses = [ {
-  address = "fe00:aa:bb:cc::2";
-  prefixLength = 64;
-} ];
+{
+  networking.interfaces.eth0.ipv6.addresses = [ {
+    address = "fe00:aa:bb:cc::2";
+    prefixLength = 64;
+  } ];
+}
 ```
 
 For configuring a gateway, optionally with explicitly specified
 interface:
 
 ```nix
-networking.defaultGateway6 = {
-  address = "fe00::1";
-  interface = "enp0s3";
-};
+{
+  networking.defaultGateway6 = {
+    address = "fe00::1";
+    interface = "enp0s3";
+  };
+}
 ```
 
 See [](#sec-ipv4) for similar examples and additional information.
diff --git a/nixpkgs/nixos/doc/manual/configuration/kubernetes.chapter.md b/nixpkgs/nixos/doc/manual/configuration/kubernetes.chapter.md
index f39726090e43..fba40b648752 100644
--- a/nixpkgs/nixos/doc/manual/configuration/kubernetes.chapter.md
+++ b/nixpkgs/nixos/doc/manual/configuration/kubernetes.chapter.md
@@ -7,14 +7,16 @@ There are generally two ways of enabling Kubernetes on NixOS. One way is
 to enable and configure cluster components appropriately by hand:
 
 ```nix
-services.kubernetes = {
-  apiserver.enable = true;
-  controllerManager.enable = true;
-  scheduler.enable = true;
-  addonManager.enable = true;
-  proxy.enable = true;
-  flannel.enable = true;
-};
+{
+  services.kubernetes = {
+    apiserver.enable = true;
+    controllerManager.enable = true;
+    scheduler.enable = true;
+    addonManager.enable = true;
+    proxy.enable = true;
+    flannel.enable = true;
+  };
+}
 ```
 
 Another way is to assign cluster roles ("master" and/or "node") to
@@ -22,20 +24,26 @@ the host. This enables apiserver, controllerManager, scheduler,
 addonManager, kube-proxy and etcd:
 
 ```nix
-services.kubernetes.roles = [ "master" ];
+{
+  services.kubernetes.roles = [ "master" ];
+}
 ```
 
 While this will enable the kubelet and kube-proxy only:
 
 ```nix
-services.kubernetes.roles = [ "node" ];
+{
+  services.kubernetes.roles = [ "node" ];
+}
 ```
 
 Assigning both the master and node roles is usable if you want a single
 node Kubernetes cluster for dev or testing purposes:
 
 ```nix
-services.kubernetes.roles = [ "master" "node" ];
+{
+  services.kubernetes.roles = [ "master" "node" ];
+}
 ```
 
 Note: Assigning either role will also default both
diff --git a/nixpkgs/nixos/doc/manual/configuration/linux-kernel.chapter.md b/nixpkgs/nixos/doc/manual/configuration/linux-kernel.chapter.md
index 31d8d1a7d0cf..3bc97446f452 100644
--- a/nixpkgs/nixos/doc/manual/configuration/linux-kernel.chapter.md
+++ b/nixpkgs/nixos/doc/manual/configuration/linux-kernel.chapter.md
@@ -5,7 +5,9 @@ option `boot.kernelPackages`. For instance, this selects the Linux 3.10
 kernel:
 
 ```nix
-boot.kernelPackages = pkgs.linuxKernel.packages.linux_3_10;
+{
+  boot.kernelPackages = pkgs.linuxKernel.packages.linux_3_10;
+}
 ```
 
 Note that this not only replaces the kernel, but also packages that are
@@ -40,13 +42,15 @@ If you want to change the kernel configuration, you can use the
 instance, to enable support for the kernel debugger KGDB:
 
 ```nix
-nixpkgs.config.packageOverrides = pkgs: pkgs.lib.recursiveUpdate pkgs {
-  linuxKernel.kernels.linux_5_10 = pkgs.linuxKernel.kernels.linux_5_10.override {
-    extraConfig = ''
-      KGDB y
-    '';
+{
+  nixpkgs.config.packageOverrides = pkgs: pkgs.lib.recursiveUpdate pkgs {
+    linuxKernel.kernels.linux_5_10 = pkgs.linuxKernel.kernels.linux_5_10.override {
+      extraConfig = ''
+        KGDB y
+      '';
+    };
   };
-};
+}
 ```
 
 `extraConfig` takes a list of Linux kernel configuration options, one
@@ -59,14 +63,18 @@ by `udev`. You can force a module to be loaded via
 [](#opt-boot.kernelModules), e.g.
 
 ```nix
-boot.kernelModules = [ "fuse" "kvm-intel" "coretemp" ];
+{
+  boot.kernelModules = [ "fuse" "kvm-intel" "coretemp" ];
+}
 ```
 
 If the module is required early during the boot (e.g. to mount the root
 file system), you can use [](#opt-boot.initrd.kernelModules):
 
 ```nix
-boot.initrd.kernelModules = [ "cifs" ];
+{
+  boot.initrd.kernelModules = [ "cifs" ];
+}
 ```
 
 This causes the specified modules and their dependencies to be added to
@@ -76,7 +84,9 @@ Kernel runtime parameters can be set through
 [](#opt-boot.kernel.sysctl), e.g.
 
 ```nix
-boot.kernel.sysctl."net.ipv4.tcp_keepalive_time" = 120;
+{
+  boot.kernel.sysctl."net.ipv4.tcp_keepalive_time" = 120;
+}
 ```
 
 sets the kernel's TCP keepalive time to 120 seconds. To see the
@@ -89,7 +99,9 @@ Please refer to the Nixpkgs manual for the various ways of [building a custom ke
 To use your custom kernel package in your NixOS configuration, set
 
 ```nix
-boot.kernelPackages = pkgs.linuxPackagesFor yourCustomKernel;
+{
+  boot.kernelPackages = pkgs.linuxPackagesFor yourCustomKernel;
+}
 ```
 
 ## Rust {#sec-linux-rust}
@@ -99,15 +111,17 @@ default. For kernel versions 6.7 or newer, experimental Rust support
 can be enabled. In a NixOS configuration, set:
 
 ```nix
-boot.kernelPatches = [
-  {
-    name = "Rust Support";
-    patch = null;
-    features = {
-      rust = true;
-    };
-  }
-];
+{
+  boot.kernelPatches = [
+    {
+      name = "Rust Support";
+      patch = null;
+      features = {
+        rust = true;
+      };
+    }
+  ];
+}
 ```
 
 ## Developing kernel modules {#sec-linux-config-developing-modules}
diff --git a/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.section.md b/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.section.md
index 7615b95aef42..4d2f625073d4 100644
--- a/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.section.md
@@ -29,15 +29,19 @@ system is automatically mounted at boot time as `/`, add the following
 to `configuration.nix`:
 
 ```nix
-boot.initrd.luks.devices.crypted.device = "/dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d";
-fileSystems."/".device = "/dev/mapper/crypted";
+{
+  boot.initrd.luks.devices.crypted.device = "/dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d";
+  fileSystems."/".device = "/dev/mapper/crypted";
+}
 ```
 
 Should grub be used as bootloader, and `/boot` is located on an
 encrypted partition, it is necessary to add the following grub option:
 
 ```nix
-boot.loader.grub.enableCryptodisk = true;
+{
+  boot.loader.grub.enableCryptodisk = true;
+}
 ```
 
 ## FIDO2 {#sec-luks-file-systems-fido2}
@@ -68,8 +72,10 @@ To ensure that this file system is decrypted using the FIDO2 compatible
 key, add the following to `configuration.nix`:
 
 ```nix
-boot.initrd.luks.fido2Support = true;
-boot.initrd.luks.devices."/dev/sda2".fido2.credential = "f1d00200108b9d6e849a8b388da457688e3dd653b4e53770012d8f28e5d3b269865038c346802f36f3da7278b13ad6a3bb6a1452e24ebeeaa24ba40eef559b1b287d2a2f80b7";
+{
+  boot.initrd.luks.fido2Support = true;
+  boot.initrd.luks.devices."/dev/sda2".fido2.credential = "f1d00200108b9d6e849a8b388da457688e3dd653b4e53770012d8f28e5d3b269865038c346802f36f3da7278b13ad6a3bb6a1452e24ebeeaa24ba40eef559b1b287d2a2f80b7";
+}
 ```
 
 You can also use the FIDO2 passwordless setup, but for security reasons,
@@ -77,7 +83,9 @@ you might want to enable it only when your device is PIN protected, such
 as [Trezor](https://trezor.io/).
 
 ```nix
-boot.initrd.luks.devices."/dev/sda2".fido2.passwordLess = true;
+{
+  boot.initrd.luks.devices."/dev/sda2".fido2.passwordLess = true;
+}
 ```
 
 ### systemd Stage 1 {#sec-luks-file-systems-fido2-systemd}
@@ -88,13 +96,15 @@ unlocking the existing LUKS2 volume `root` using any enrolled FIDO2 compatible
 tokens.
 
 ```nix
-boot.initrd = {
-  luks.devices.root = {
-    crypttabExtraOpts = [ "fido2-device=auto" ];
-    device = "/dev/sda2";
+{
+  boot.initrd = {
+    luks.devices.root = {
+      crypttabExtraOpts = [ "fido2-device=auto" ];
+      device = "/dev/sda2";
+    };
+    systemd.enable = true;
   };
-  systemd.enable = true;
-};
+}
 ```
 
 All tokens that should be used for unlocking the LUKS2-encrypted volume must
diff --git a/nixpkgs/nixos/doc/manual/configuration/modularity.section.md b/nixpkgs/nixos/doc/manual/configuration/modularity.section.md
index f4a566d66973..cb9f543797d2 100644
--- a/nixpkgs/nixos/doc/manual/configuration/modularity.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/modularity.section.md
@@ -16,7 +16,7 @@ including them from `configuration.nix`, e.g.:
 { imports = [ ./vpn.nix ./kde.nix ];
   services.httpd.enable = true;
   environment.systemPackages = [ pkgs.emacs ];
-  ...
+  # ...
 }
 ```
 
@@ -42,7 +42,9 @@ merged last, so for list-type options, it will appear at the end of the
 merged list. If you want it to appear first, you can use `mkBefore`:
 
 ```nix
-boot.kernelModules = mkBefore [ "kvm-intel" ];
+{
+  boot.kernelModules = mkBefore [ "kvm-intel" ];
+}
 ```
 
 This causes the `kvm-intel` kernel module to be loaded before any other
@@ -60,7 +62,9 @@ When that happens, it's possible to force one definition take precedence
 over the others:
 
 ```nix
-services.httpd.adminAddr = pkgs.lib.mkForce "bob@example.org";
+{
+  services.httpd.adminAddr = pkgs.lib.mkForce "bob@example.org";
+}
 ```
 
 When using multiple modules, you may need to access configuration values
diff --git a/nixpkgs/nixos/doc/manual/configuration/network-manager.section.md b/nixpkgs/nixos/doc/manual/configuration/network-manager.section.md
index 4bda21d34a10..8e8dfabbf3cd 100644
--- a/nixpkgs/nixos/doc/manual/configuration/network-manager.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/network-manager.section.md
@@ -4,7 +4,9 @@ To facilitate network configuration, some desktop environments use
 NetworkManager. You can enable NetworkManager by setting:
 
 ```nix
-networking.networkmanager.enable = true;
+{
+  networking.networkmanager.enable = true;
+}
 ```
 
 some desktop managers (e.g., GNOME) enable NetworkManager automatically
@@ -14,7 +16,9 @@ All users that should have permission to change network settings must
 belong to the `networkmanager` group:
 
 ```nix
-users.users.alice.extraGroups = [ "networkmanager" ];
+{
+  users.users.alice.extraGroups = [ "networkmanager" ];
+}
 ```
 
 NetworkManager is controlled using either `nmcli` or `nmtui`
@@ -32,9 +36,11 @@ can be used together if desired. To do this you need to instruct
 NetworkManager to ignore those interfaces like:
 
 ```nix
-networking.networkmanager.unmanaged = [
-   "*" "except:type:wwan" "except:type:gsm"
-];
+{
+  networking.networkmanager.unmanaged = [
+     "*" "except:type:wwan" "except:type:gsm"
+  ];
+}
 ```
 
 Refer to the option description for the exact syntax and references to
diff --git a/nixpkgs/nixos/doc/manual/configuration/overlayfs.section.md b/nixpkgs/nixos/doc/manual/configuration/overlayfs.section.md
index 592fb7c2e6f7..7027a6f426d4 100644
--- a/nixpkgs/nixos/doc/manual/configuration/overlayfs.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/overlayfs.section.md
@@ -4,21 +4,23 @@ NixOS offers a convenient abstraction to create both read-only as well writable
 overlays.
 
 ```nix
-fileSystems = {
-  "/writable-overlay" = {
-    overlay = {
-      lowerdir = [ writableOverlayLowerdir ];
-      upperdir = "/.rw-writable-overlay/upper";
-      workdir = "/.rw-writable-overlay/work";
+{
+  fileSystems = {
+    "/writable-overlay" = {
+      overlay = {
+        lowerdir = [ writableOverlayLowerdir ];
+        upperdir = "/.rw-writable-overlay/upper";
+        workdir = "/.rw-writable-overlay/work";
+      };
+      # Mount the writable overlay in the initrd.
+      neededForBoot = true;
     };
-    # Mount the writable overlay in the initrd.
-    neededForBoot = true;
+    "/readonly-overlay".overlay.lowerdir = [
+      writableOverlayLowerdir
+      writableOverlayLowerdir2
+    ];
   };
-  "/readonly-overlay".overlay.lowerdir = [
-    writableOverlayLowerdir
-    writableOverlayLowerdir2
-  ];
-};
+}
 ```
 
 If `upperdir` and `workdir` are not null, they will be created before the
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles.chapter.md b/nixpkgs/nixos/doc/manual/configuration/profiles.chapter.md
index 9f6c11b0d59d..6161d48e353f 100644
--- a/nixpkgs/nixos/doc/manual/configuration/profiles.chapter.md
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles.chapter.md
@@ -8,9 +8,11 @@ is to say, expected usage is to add them to the imports list of your
 `/etc/configuration.nix` as such:
 
 ```nix
-imports = [
-  <nixpkgs/nixos/modules/profiles/profile-name.nix>
-];
+{
+  imports = [
+    <nixpkgs/nixos/modules/profiles/profile-name.nix>
+  ];
+}
 ```
 
 Even if some of these profiles seem only useful in the context of
diff --git a/nixpkgs/nixos/doc/manual/configuration/renaming-interfaces.section.md b/nixpkgs/nixos/doc/manual/configuration/renaming-interfaces.section.md
index 5b515e9f82a0..4804e35f8a24 100644
--- a/nixpkgs/nixos/doc/manual/configuration/renaming-interfaces.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/renaming-interfaces.section.md
@@ -25,10 +25,12 @@ we assign the name `wan` to the interface with MAC address
 `52:54:00:12:01:01` using a netword link unit:
 
 ```nix
-systemd.network.links."10-wan" = {
-  matchConfig.PermanentMACAddress = "52:54:00:12:01:01";
-  linkConfig.Name = "wan";
-};
+{
+  systemd.network.links."10-wan" = {
+    matchConfig.PermanentMACAddress = "52:54:00:12:01:01";
+    linkConfig.Name = "wan";
+  };
+}
 ```
 
 Note that links are directly read by udev, *not networkd*, and will work
@@ -37,10 +39,12 @@ even if networkd is disabled.
 Alternatively, we can use a plain old udev rule:
 
 ```nix
-boot.initrd.services.udev.rules = ''
-  SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", \
-  ATTR{address}=="52:54:00:12:01:01", KERNEL=="eth*", NAME="wan"
-'';
+{
+  boot.initrd.services.udev.rules = ''
+    SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", \
+    ATTR{address}=="52:54:00:12:01:01", KERNEL=="eth*", NAME="wan"
+  '';
+}
 ```
 
 ::: {.warning}
diff --git a/nixpkgs/nixos/doc/manual/configuration/ssh.section.md b/nixpkgs/nixos/doc/manual/configuration/ssh.section.md
index 9e239a848178..8754e3d9ccaf 100644
--- a/nixpkgs/nixos/doc/manual/configuration/ssh.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/ssh.section.md
@@ -3,7 +3,9 @@
 Secure shell (SSH) access to your machine can be enabled by setting:
 
 ```nix
-services.openssh.enable = true;
+{
+  services.openssh.enable = true;
+}
 ```
 
 By default, root logins using a password are disallowed. They can be
@@ -14,6 +16,8 @@ You can declaratively specify authorised RSA/DSA public keys for a user
 as follows:
 
 ```nix
-users.users.alice.openssh.authorizedKeys.keys =
-  [ "ssh-dss AAAAB3NzaC1kc3MAAACBAPIkGWVEt4..." ];
+{
+  users.users.alice.openssh.authorizedKeys.keys =
+    [ "ssh-dss AAAAB3NzaC1kc3MAAACBAPIkGWVEt4..." ];
+}
 ```
diff --git a/nixpkgs/nixos/doc/manual/configuration/subversion.chapter.md b/nixpkgs/nixos/doc/manual/configuration/subversion.chapter.md
index ff870f5c40b9..2436138669fe 100644
--- a/nixpkgs/nixos/doc/manual/configuration/subversion.chapter.md
+++ b/nixpkgs/nixos/doc/manual/configuration/subversion.chapter.md
@@ -21,9 +21,11 @@ Apache HTTP, setting [](#opt-services.httpd.adminAddr)
 appropriately:
 
 ```nix
-services.httpd.enable = true;
-services.httpd.adminAddr = ...;
-networking.firewall.allowedTCPPorts = [ 80 443 ];
+{
+  services.httpd.enable = true;
+  services.httpd.adminAddr = "...";
+  networking.firewall.allowedTCPPorts = [ 80 443 ];
+}
 ```
 
 For a simple Subversion server with basic authentication, configure the
@@ -34,25 +36,28 @@ the `.authz` file describing access permission, and `AuthUserFile` to
 the password file.
 
 ```nix
-services.httpd.extraModules = [
-    # note that order is *super* important here
-    { name = "dav_svn"; path = "${pkgs.apacheHttpdPackages.subversion}/modules/mod_dav_svn.so"; }
-    { name = "authz_svn"; path = "${pkgs.apacheHttpdPackages.subversion}/modules/mod_authz_svn.so"; }
-  ];
-  services.httpd.virtualHosts = {
-    "svn" = {
-       hostName = HOSTNAME;
-       documentRoot = DOCUMENTROOT;
-       locations."/svn".extraConfig = ''
-           DAV svn
-           SVNParentPath REPO_PARENT
-           AuthzSVNAccessFile ACCESS_FILE
-           AuthName "SVN Repositories"
-           AuthType Basic
-           AuthUserFile PASSWORD_FILE
-           Require valid-user
-      '';
-    }
+{
+  services.httpd.extraModules = [
+      # note that order is *super* important here
+      { name = "dav_svn"; path = "${pkgs.apacheHttpdPackages.subversion}/modules/mod_dav_svn.so"; }
+      { name = "authz_svn"; path = "${pkgs.apacheHttpdPackages.subversion}/modules/mod_authz_svn.so"; }
+    ];
+    services.httpd.virtualHosts = {
+      "svn" = {
+         hostName = HOSTNAME;
+         documentRoot = DOCUMENTROOT;
+         locations."/svn".extraConfig = ''
+             DAV svn
+             SVNParentPath REPO_PARENT
+             AuthzSVNAccessFile ACCESS_FILE
+             AuthName "SVN Repositories"
+             AuthType Basic
+             AuthUserFile PASSWORD_FILE
+             Require valid-user
+        '';
+      };
+    };
+}
 ```
 
 The key `"svn"` is just a symbolic name identifying the virtual host.
@@ -90,7 +95,7 @@ $ htpasswd -s PASSWORD_FILE USER_NAME
 The file describing access permissions `ACCESS_FILE` will look something
 like the following:
 
-```nix
+```
 [/]
 * = r
 
diff --git a/nixpkgs/nixos/doc/manual/configuration/user-mgmt.chapter.md b/nixpkgs/nixos/doc/manual/configuration/user-mgmt.chapter.md
index 71d61ce4c641..7d83121d41e0 100644
--- a/nixpkgs/nixos/doc/manual/configuration/user-mgmt.chapter.md
+++ b/nixpkgs/nixos/doc/manual/configuration/user-mgmt.chapter.md
@@ -6,13 +6,15 @@ management. In the declarative style, users are specified in
 account named `alice` shall exist:
 
 ```nix
-users.users.alice = {
-  isNormalUser = true;
-  home = "/home/alice";
-  description = "Alice Foobar";
-  extraGroups = [ "wheel" "networkmanager" ];
-  openssh.authorizedKeys.keys = [ "ssh-dss AAAAB3Nza... alice@foobar" ];
-};
+{
+  users.users.alice = {
+    isNormalUser = true;
+    home = "/home/alice";
+    description = "Alice Foobar";
+    extraGroups = [ "wheel" "networkmanager" ];
+    openssh.authorizedKeys.keys = [ "ssh-dss AAAAB3Nza... alice@foobar" ];
+  };
+}
 ```
 
 Note that `alice` is a member of the `wheel` and `networkmanager`
@@ -38,7 +40,9 @@ A user ID (uid) is assigned automatically. You can also specify a uid
 manually by adding
 
 ```nix
-uid = 1000;
+{
+  uid = 1000;
+}
 ```
 
 to the user specification.
@@ -47,7 +51,9 @@ Groups can be specified similarly. The following states that a group
 named `students` shall exist:
 
 ```nix
-users.groups.students.gid = 1000;
+{
+  users.groups.students.gid = 1000;
+}
 ```
 
 As with users, the group ID (gid) is optional and will be assigned
@@ -100,7 +106,9 @@ Instead of using a custom perl script to create users and groups, you can use
 systemd-sysusers:
 
 ```nix
-systemd.sysusers.enable = true;
+{
+  systemd.sysusers.enable = true;
+}
 ```
 
 The primary benefit of this is to remove a dependency on perl.
diff --git a/nixpkgs/nixos/doc/manual/configuration/wayland.chapter.md b/nixpkgs/nixos/doc/manual/configuration/wayland.chapter.md
index 0f195bd66567..27c027d38514 100644
--- a/nixpkgs/nixos/doc/manual/configuration/wayland.chapter.md
+++ b/nixpkgs/nixos/doc/manual/configuration/wayland.chapter.md
@@ -9,7 +9,9 @@ a Wayland Compositor such as sway without separately enabling a Wayland
 server:
 
 ```nix
+{
 programs.sway.enable = true;
+}
 ```
 
 This installs the sway compositor along with some essential utilities.
@@ -19,7 +21,9 @@ If you are using a wlroots-based compositor, like sway, and want to be
 able to share your screen, you might want to activate this option:
 
 ```nix
-xdg.portal.wlr.enable = true;
+{
+  xdg.portal.wlr.enable = true;
+}
 ```
 
 and configure Pipewire using
diff --git a/nixpkgs/nixos/doc/manual/configuration/wireless.section.md b/nixpkgs/nixos/doc/manual/configuration/wireless.section.md
index 3299d2d7ecb8..df828698cf03 100644
--- a/nixpkgs/nixos/doc/manual/configuration/wireless.section.md
+++ b/nixpkgs/nixos/doc/manual/configuration/wireless.section.md
@@ -7,25 +7,29 @@ skip the rest of this section on wireless networks.
 NixOS will start wpa_supplicant for you if you enable this setting:
 
 ```nix
-networking.wireless.enable = true;
+{
+  networking.wireless.enable = true;
+}
 ```
 
 NixOS lets you specify networks for wpa_supplicant declaratively:
 
 ```nix
-networking.wireless.networks = {
-  echelon = {                # SSID with no spaces or special characters
-    psk = "abcdefgh";
-  };
-  "echelon's AP" = {         # SSID with spaces and/or special characters
-    psk = "ijklmnop";
-  };
-  echelon = {                # Hidden SSID
-    hidden = true;
-    psk = "qrstuvwx";
+{
+  networking.wireless.networks = {
+    echelon = {                # SSID with no spaces or special characters
+      psk = "abcdefgh";
+    };
+    "echelon's AP" = {         # SSID with spaces and/or special characters
+      psk = "ijklmnop";
+    };
+    echelon = {                # Hidden SSID
+      hidden = true;
+      psk = "qrstuvwx";
+    };
+    free.wifi = {};            # Public wireless network
   };
-  free.wifi = {};            # Public wireless network
-};
+}
 ```
 
 Be aware that keys will be written to the nix store in plaintext! When
@@ -46,11 +50,13 @@ network={
 ```
 
 ```nix
-networking.wireless.networks = {
-  echelon = {
-    pskRaw = "dca6d6ed41f4ab5a984c9f55f6f66d4efdc720ebf66959810f4329bb391c5435";
+{
+  networking.wireless.networks = {
+    echelon = {
+      pskRaw = "dca6d6ed41f4ab5a984c9f55f6f66d4efdc720ebf66959810f4329bb391c5435";
+    };
   };
-};
+}
 ```
 
 or you can use it to directly generate the `wpa_supplicant.conf`:
diff --git a/nixpkgs/nixos/doc/manual/configuration/x-windows.chapter.md b/nixpkgs/nixos/doc/manual/configuration/x-windows.chapter.md
index bf1872ae01ac..8162e38e9f5b 100644
--- a/nixpkgs/nixos/doc/manual/configuration/x-windows.chapter.md
+++ b/nixpkgs/nixos/doc/manual/configuration/x-windows.chapter.md
@@ -4,7 +4,9 @@ The X Window System (X11) provides the basis of NixOS' graphical user
 interface. It can be enabled as follows:
 
 ```nix
-services.xserver.enable = true;
+{
+  services.xserver.enable = true;
+}
 ```
 
 The X server will automatically detect and use the appropriate video
@@ -12,7 +14,9 @@ driver from a set of X.org drivers (such as `vesa` and `intel`). You can
 also specify a driver manually, e.g.
 
 ```nix
-services.xserver.videoDrivers = [ "r128" ];
+{
+  services.xserver.videoDrivers = [ "r128" ];
+}
 ```
 
 to enable X.org's `xf86-video-r128` driver.
@@ -22,15 +26,17 @@ Otherwise, you can only log into a plain undecorated `xterm` window.
 Thus you should pick one or more of the following lines:
 
 ```nix
-services.xserver.desktopManager.plasma5.enable = true;
-services.xserver.desktopManager.xfce.enable = true;
-services.xserver.desktopManager.gnome.enable = true;
-services.xserver.desktopManager.mate.enable = true;
-services.xserver.windowManager.xmonad.enable = true;
-services.xserver.windowManager.twm.enable = true;
-services.xserver.windowManager.icewm.enable = true;
-services.xserver.windowManager.i3.enable = true;
-services.xserver.windowManager.herbstluftwm.enable = true;
+{
+  services.xserver.desktopManager.plasma5.enable = true;
+  services.xserver.desktopManager.xfce.enable = true;
+  services.xserver.desktopManager.gnome.enable = true;
+  services.xserver.desktopManager.mate.enable = true;
+  services.xserver.windowManager.xmonad.enable = true;
+  services.xserver.windowManager.twm.enable = true;
+  services.xserver.windowManager.icewm.enable = true;
+  services.xserver.windowManager.i3.enable = true;
+  services.xserver.windowManager.herbstluftwm.enable = true;
+}
 ```
 
 NixOS's default *display manager* (the program that provides a graphical
@@ -38,22 +44,28 @@ login prompt and manages the X server) is LightDM. You can select an
 alternative one by picking one of the following lines:
 
 ```nix
-services.xserver.displayManager.sddm.enable = true;
-services.xserver.displayManager.gdm.enable = true;
+{
+  services.xserver.displayManager.sddm.enable = true;
+  services.xserver.displayManager.gdm.enable = true;
+}
 ```
 
 You can set the keyboard layout (and optionally the layout variant):
 
 ```nix
-services.xserver.xkb.layout = "de";
-services.xserver.xkb.variant = "neo";
+{
+  services.xserver.xkb.layout = "de";
+  services.xserver.xkb.variant = "neo";
+}
 ```
 
 The X server is started automatically at boot time. If you don't want
 this to happen, you can set:
 
 ```nix
-services.xserver.autorun = false;
+{
+  services.xserver.autorun = false;
+}
 ```
 
 The X server can then be started manually:
@@ -66,7 +78,9 @@ On 64-bit systems, if you want OpenGL for 32-bit programs such as in
 Wine, you should also set the following:
 
 ```nix
-hardware.opengl.driSupport32Bit = true;
+{
+  hardware.opengl.driSupport32Bit = true;
+}
 ```
 
 ## Auto-login {#sec-x11-auto-login}
@@ -84,16 +98,20 @@ desktop environment. If you wanted no desktop environment and i3 as your
 your window manager, you'd define:
 
 ```nix
-services.xserver.displayManager.defaultSession = "none+i3";
+{
+  services.xserver.displayManager.defaultSession = "none+i3";
+}
 ```
 
 Every display manager in NixOS supports auto-login, here is an example
 using lightdm for a user `alice`:
 
 ```nix
-services.xserver.displayManager.lightdm.enable = true;
-services.xserver.displayManager.autoLogin.enable = true;
-services.xserver.displayManager.autoLogin.user = "alice";
+{
+  services.xserver.displayManager.lightdm.enable = true;
+  services.xserver.displayManager.autoLogin.enable = true;
+  services.xserver.displayManager.autoLogin.user = "alice";
+}
 ```
 
 ## Intel Graphics drivers {#sec-x11--graphics-cards-intel}
@@ -119,18 +137,22 @@ drivers. Use the option
 to set one. The recommended configuration for modern systems is:
 
 ```nix
-services.xserver.videoDrivers = [ "modesetting" ];
+{
+  services.xserver.videoDrivers = [ "modesetting" ];
+}
 ```
 
 If you experience screen tearing no matter what, this configuration was
 reported to resolve the issue:
 
 ```nix
-services.xserver.videoDrivers = [ "intel" ];
-services.xserver.deviceSection = ''
-  Option "DRI" "2"
-  Option "TearFree" "true"
-'';
+{
+  services.xserver.videoDrivers = [ "intel" ];
+  services.xserver.deviceSection = ''
+    Option "DRI" "2"
+    Option "TearFree" "true"
+  '';
+}
 ```
 
 Note that this will likely downgrade the performance compared to
@@ -143,17 +165,19 @@ better 3D performance than the X.org drivers. It is not enabled by
 default because it's not free software. You can enable it as follows:
 
 ```nix
-services.xserver.videoDrivers = [ "nvidia" ];
+{
+  services.xserver.videoDrivers = [ "nvidia" ];
+}
 ```
 
-Or if you have an older card, you may have to use one of the legacy
-drivers:
+If you have an older card, you may have to use one of the legacy drivers:
 
 ```nix
-services.xserver.videoDrivers = [ "nvidiaLegacy470" ];
-services.xserver.videoDrivers = [ "nvidiaLegacy390" ];
-services.xserver.videoDrivers = [ "nvidiaLegacy340" ];
-services.xserver.videoDrivers = [ "nvidiaLegacy304" ];
+{
+  hardware.nvidia.package = config.boot.kernelPackages.nvidiaPackages.legacy_470;
+  hardware.nvidia.package = config.boot.kernelPackages.nvidiaPackages.legacy_390;
+  hardware.nvidia.package = config.boot.kernelPackages.nvidiaPackages.legacy_340;
+}
 ```
 
 You may need to reboot after enabling this driver to prevent a clash
@@ -168,7 +192,9 @@ performance. If you still want to use it anyway, you need to explicitly
 set:
 
 ```nix
-services.xserver.videoDrivers = [ "amdgpu-pro" ];
+{
+  services.xserver.videoDrivers = [ "amdgpu-pro" ];
+}
 ```
 
 You will need to reboot after enabling this driver to prevent a clash
@@ -180,14 +206,18 @@ Support for Synaptics touchpads (found in many laptops such as the Dell
 Latitude series) can be enabled as follows:
 
 ```nix
-services.xserver.libinput.enable = true;
+{
+  services.xserver.libinput.enable = true;
+}
 ```
 
 The driver has many options (see [](#ch-options)).
 For instance, the following disables tap-to-click behavior:
 
 ```nix
-services.xserver.libinput.touchpad.tapping = false;
+{
+  services.xserver.libinput.touchpad.tapping = false;
+}
 ```
 
 Note: the use of `services.xserver.synaptics` is deprecated since NixOS
@@ -200,9 +230,11 @@ GTK themes can be installed either to user profile or system-wide (via
 GTK ones, you can use the following configuration:
 
 ```nix
-qt.enable = true;
-qt.platformTheme = "gtk2";
-qt.style = "gtk2";
+{
+  qt.enable = true;
+  qt.platformTheme = "gtk2";
+  qt.style = "gtk2";
+}
 ```
 
 ## Custom XKB layouts {#custom-xkb-layouts}
@@ -219,7 +251,7 @@ Create a file called `us-greek` with the following content (under a
 directory called `symbols`; it's an XKB peculiarity that will help with
 testing):
 
-```nix
+```
 xkb_symbols "us-greek"
 {
   include "us(basic)"            // includes the base US keys
@@ -236,11 +268,13 @@ xkb_symbols "us-greek"
 A minimal layout specification must include the following:
 
 ```nix
-services.xserver.xkb.extraLayouts.us-greek = {
-  description = "US layout with alt-gr greek";
-  languages   = [ "eng" ];
-  symbolsFile = /yourpath/symbols/us-greek;
-};
+{
+  services.xserver.xkb.extraLayouts.us-greek = {
+    description = "US layout with alt-gr greek";
+    languages   = [ "eng" ];
+    symbolsFile = /yourpath/symbols/us-greek;
+  };
+}
 ```
 
 ::: {.note}
@@ -277,7 +311,7 @@ Use the *xev* utility from `pkgs.xorg.xev` to find the codes of the keys
 of interest, then create a `media-key` file to hold the keycodes
 definitions
 
-```nix
+```
 xkb_keycodes "media"
 {
  <volUp>   = 123;
@@ -287,7 +321,7 @@ xkb_keycodes "media"
 
 Now use the newly define keycodes in `media-sym`:
 
-```nix
+```
 xkb_symbols "media"
 {
  key.type = "ONE_LEVEL";
@@ -299,12 +333,14 @@ xkb_symbols "media"
 As before, to install the layout do
 
 ```nix
-services.xserver.xkb.extraLayouts.media = {
-  description  = "Multimedia keys remapping";
-  languages    = [ "eng" ];
-  symbolsFile  = /path/to/media-key;
-  keycodesFile = /path/to/media-sym;
-};
+{
+  services.xserver.xkb.extraLayouts.media = {
+    description  = "Multimedia keys remapping";
+    languages    = [ "eng" ];
+    symbolsFile  = /path/to/media-key;
+    keycodesFile = /path/to/media-sym;
+  };
+}
 ```
 
 ::: {.note}
@@ -320,7 +356,9 @@ workaround, you can set the keymap using `setxkbmap` at the start of the
 session with:
 
 ```nix
-services.xserver.displayManager.sessionCommands = "setxkbmap -keycodes media";
+{
+  services.xserver.displayManager.sessionCommands = "setxkbmap -keycodes media";
+}
 ```
 
 If you are manually starting the X server, you should set the argument
diff --git a/nixpkgs/nixos/doc/manual/configuration/xfce.chapter.md b/nixpkgs/nixos/doc/manual/configuration/xfce.chapter.md
index 9ec4a51d6e35..fcc9bcc45641 100644
--- a/nixpkgs/nixos/doc/manual/configuration/xfce.chapter.md
+++ b/nixpkgs/nixos/doc/manual/configuration/xfce.chapter.md
@@ -3,21 +3,25 @@
 To enable the Xfce Desktop Environment, set
 
 ```nix
-services.xserver.desktopManager.xfce.enable = true;
-services.xserver.displayManager.defaultSession = "xfce";
+{
+  services.xserver.desktopManager.xfce.enable = true;
+  services.xserver.displayManager.defaultSession = "xfce";
+}
 ```
 
 Optionally, *picom* can be enabled for nice graphical effects, some
 example settings:
 
 ```nix
-services.picom = {
-  enable = true;
-  fade = true;
-  inactiveOpacity = 0.9;
-  shadow = true;
-  fadeDelta = 4;
-};
+{
+  services.picom = {
+    enable = true;
+    fade = true;
+    inactiveOpacity = 0.9;
+    shadow = true;
+    fadeDelta = 4;
+  };
+}
 ```
 
 Some Xfce programs are not installed automatically. To install them
diff --git a/nixpkgs/nixos/doc/manual/development/activation-script.section.md b/nixpkgs/nixos/doc/manual/development/activation-script.section.md
index cc317a6a01aa..f771c3524b79 100644
--- a/nixpkgs/nixos/doc/manual/development/activation-script.section.md
+++ b/nixpkgs/nixos/doc/manual/development/activation-script.section.md
@@ -17,13 +17,15 @@ activation script will take these dependencies into account and order the
 snippets accordingly. As a simple example:
 
 ```nix
-system.activationScripts.my-activation-script = {
-  deps = [ "etc" ];
-  # supportsDryActivation = true;
-  text = ''
-    echo "Hallo i bims"
-  '';
-};
+{
+  system.activationScripts.my-activation-script = {
+    deps = [ "etc" ];
+    # supportsDryActivation = true;
+    text = ''
+      echo "Hallo i bims"
+    '';
+  };
+}
 ```
 
 This example creates an activation script snippet that is run after the `etc`
diff --git a/nixpkgs/nixos/doc/manual/development/assertions.section.md b/nixpkgs/nixos/doc/manual/development/assertions.section.md
index cc6d81e56990..eb5158c90f98 100644
--- a/nixpkgs/nixos/doc/manual/development/assertions.section.md
+++ b/nixpkgs/nixos/doc/manual/development/assertions.section.md
@@ -18,7 +18,7 @@ This is an example of using `warnings`.
                This is known to cause some specific problems in certain situations.
                '' ]
       else [];
-  }
+  };
 }
 ```
 
@@ -35,6 +35,6 @@ This example, extracted from the [`syslogd` module](https://github.com/NixOS/nix
           message = "rsyslogd conflicts with syslogd";
         }
       ];
-  }
+  };
 }
 ```
diff --git a/nixpkgs/nixos/doc/manual/development/etc-overlay.section.md b/nixpkgs/nixos/doc/manual/development/etc-overlay.section.md
index e6f6d8d4ca1e..d8588f508a26 100644
--- a/nixpkgs/nixos/doc/manual/development/etc-overlay.section.md
+++ b/nixpkgs/nixos/doc/manual/development/etc-overlay.section.md
@@ -9,7 +9,9 @@ Instead of using a custom perl script to activate `/etc`, you activate it via an
 overlay filesystem:
 
 ```nix
-system.etc.overlay.enable = true;
+{
+  system.etc.overlay.enable = true;
+}
 ```
 
 Using an overlay has two benefits:
@@ -22,7 +24,9 @@ upper layer). However, you can also mount `/etc` immutably (i.e. read-only) by
 setting:
 
 ```nix
-system.etc.overlay.mutable = false;
+{
+  system.etc.overlay.mutable = false;
+}
 ```
 
 The overlay is atomically replaced during system switch. However, files that
diff --git a/nixpkgs/nixos/doc/manual/development/meta-attributes.section.md b/nixpkgs/nixos/doc/manual/development/meta-attributes.section.md
index 33b41fe74d29..b2ad23e58b94 100644
--- a/nixpkgs/nixos/doc/manual/development/meta-attributes.section.md
+++ b/nixpkgs/nixos/doc/manual/development/meta-attributes.section.md
@@ -14,11 +14,11 @@ file.
 { config, lib, pkgs, ... }:
 {
   options = {
-    ...
+    # ...
   };
 
   config = {
-    ...
+    # ...
   };
 
   meta = {
diff --git a/nixpkgs/nixos/doc/manual/development/non-switchable-systems.section.md b/nixpkgs/nixos/doc/manual/development/non-switchable-systems.section.md
index 87bb46c78909..a51e8233f30b 100644
--- a/nixpkgs/nixos/doc/manual/development/non-switchable-systems.section.md
+++ b/nixpkgs/nixos/doc/manual/development/non-switchable-systems.section.md
@@ -9,7 +9,7 @@ profile:
 
 ```nix
 { modulesPath, ... }: {
-  imports = [ "${modulesPath}/profiles/image-based-appliance.nix" ]
+  imports = [ "${modulesPath}/profiles/image-based-appliance.nix" ];
 }
 ```
 
diff --git a/nixpkgs/nixos/doc/manual/development/option-declarations.section.md b/nixpkgs/nixos/doc/manual/development/option-declarations.section.md
index 762070416187..ad5857b11a2e 100644
--- a/nixpkgs/nixos/doc/manual/development/option-declarations.section.md
+++ b/nixpkgs/nixos/doc/manual/development/option-declarations.section.md
@@ -6,14 +6,16 @@ hasn't been declared in any module. An option declaration generally
 looks like this:
 
 ```nix
-options = {
-  name = mkOption {
-    type = type specification;
-    default = default value;
-    example = example value;
-    description = lib.mdDoc "Description for use in the NixOS manual.";
+{
+  options = {
+    name = mkOption {
+      type = type specification;
+      default = default value;
+      example = example value;
+      description = lib.mdDoc "Description for use in the NixOS manual.";
+    };
   };
-};
+}
 ```
 
 The attribute names within the `name` attribute path must be camel
@@ -221,28 +223,34 @@ enforces that there can only be a single display manager enabled.
 ::: {#ex-option-declaration-eot-service .example}
 ### Extensible type placeholder in the service module
 ```nix
-services.xserver.displayManager.enable = mkOption {
-  description = "Display manager to use";
-  type = with types; nullOr (enum [ ]);
-};
+{
+  services.xserver.displayManager.enable = mkOption {
+    description = "Display manager to use";
+    type = with types; nullOr (enum [ ]);
+  };
+}
 ```
 :::
 
 ::: {#ex-option-declaration-eot-backend-gdm .example}
 ### Extending `services.xserver.displayManager.enable` in the `gdm` module
 ```nix
-services.xserver.displayManager.enable = mkOption {
-  type = with types; nullOr (enum [ "gdm" ]);
-};
+{
+  services.xserver.displayManager.enable = mkOption {
+    type = with types; nullOr (enum [ "gdm" ]);
+  };
+}
 ```
 :::
 
 ::: {#ex-option-declaration-eot-backend-sddm .example}
 ### Extending `services.xserver.displayManager.enable` in the `sddm` module
 ```nix
-services.xserver.displayManager.enable = mkOption {
-  type = with types; nullOr (enum [ "sddm" ]);
-};
+{
+  services.xserver.displayManager.enable = mkOption {
+    type = with types; nullOr (enum [ "sddm" ]);
+  };
+}
 ```
 :::
 
diff --git a/nixpkgs/nixos/doc/manual/development/option-def.section.md b/nixpkgs/nixos/doc/manual/development/option-def.section.md
index 6a3dc26b99be..227f41d812ff 100644
--- a/nixpkgs/nixos/doc/manual/development/option-def.section.md
+++ b/nixpkgs/nixos/doc/manual/development/option-def.section.md
@@ -4,9 +4,11 @@ Option definitions are generally straight-forward bindings of values to
 option names, like
 
 ```nix
-config = {
-  services.httpd.enable = true;
-};
+{
+  config = {
+    services.httpd.enable = true;
+  };
+}
 ```
 
 However, sometimes you need to wrap an option definition or set of
@@ -18,10 +20,12 @@ If a set of option definitions is conditional on the value of another
 option, you may need to use `mkIf`. Consider, for instance:
 
 ```nix
-config = if config.services.httpd.enable then {
-  environment.systemPackages = [ ... ];
-  ...
-} else {};
+{
+  config = if config.services.httpd.enable then {
+    environment.systemPackages = [ /* ... */ ];
+    # ...
+  } else {};
+}
 ```
 
 This definition will cause Nix to fail with an "infinite recursion"
@@ -30,30 +34,36 @@ on the value being constructed here. After all, you could also write the
 clearly circular and contradictory:
 
 ```nix
-config = if config.services.httpd.enable then {
-  services.httpd.enable = false;
-} else {
-  services.httpd.enable = true;
-};
+{
+  config = if config.services.httpd.enable then {
+    services.httpd.enable = false;
+  } else {
+    services.httpd.enable = true;
+  };
+}
 ```
 
 The solution is to write:
 
 ```nix
-config = mkIf config.services.httpd.enable {
-  environment.systemPackages = [ ... ];
-  ...
-};
+{
+  config = mkIf config.services.httpd.enable {
+    environment.systemPackages = [ /* ... */ ];
+    # ...
+  };
+}
 ```
 
 The special function `mkIf` causes the evaluation of the conditional to
 be "pushed down" into the individual definitions, as if you had written:
 
 ```nix
-config = {
-  environment.systemPackages = if config.services.httpd.enable then [ ... ] else [];
-  ...
-};
+{
+  config = {
+    environment.systemPackages = if config.services.httpd.enable then [ /* ... */ ] else [];
+    # ...
+  };
+}
 ```
 
 ## Setting Priorities {#sec-option-definitions-setting-priorities}
@@ -65,7 +75,9 @@ priority 100 and option defaults have priority 1500.
 You can specify an explicit priority by using `mkOverride`, e.g.
 
 ```nix
-services.openssh.enable = mkOverride 10 false;
+{
+  services.openssh.enable = mkOverride 10 false;
+}
 ```
 
 This definition causes all other definitions with priorities above 10 to
@@ -80,7 +92,9 @@ The functions `mkBefore` and `mkAfter` are equal to `mkOrder 500` and `mkOrder 1
 As an example,
 
 ```nix
-hardware.firmware = mkBefore [ myFirmware ];
+{
+  hardware.firmware = mkBefore [ myFirmware ];
+}
 ```
 
 This definition ensures that `myFirmware` comes before other unordered
@@ -97,13 +111,15 @@ they were declared in separate modules. This can be done using
 `mkMerge`:
 
 ```nix
-config = mkMerge
-  [ # Unconditional stuff.
-    { environment.systemPackages = [ ... ];
-    }
-    # Conditional stuff.
-    (mkIf config.services.bla.enable {
-      environment.systemPackages = [ ... ];
-    })
-  ];
+{
+  config = mkMerge
+    [ # Unconditional stuff.
+      { environment.systemPackages = [ /* ... */ ];
+      }
+      # Conditional stuff.
+      (mkIf config.services.bla.enable {
+        environment.systemPackages = [ /* ... */ ];
+      })
+    ];
+}
 ```
diff --git a/nixpkgs/nixos/doc/manual/development/option-types.section.md b/nixpkgs/nixos/doc/manual/development/option-types.section.md
index 04edf99e70b0..243039b01673 100644
--- a/nixpkgs/nixos/doc/manual/development/option-types.section.md
+++ b/nixpkgs/nixos/doc/manual/development/option-types.section.md
@@ -374,19 +374,21 @@ if you want to allow users to leave it undefined.
 ::: {#ex-submodule-direct .example}
 ### Directly defined submodule
 ```nix
-options.mod = mkOption {
-  description = "submodule example";
-  type = with types; submodule {
-    options = {
-      foo = mkOption {
-        type = int;
-      };
-      bar = mkOption {
-        type = str;
+{
+  options.mod = mkOption {
+    description = "submodule example";
+    type = with types; submodule {
+      options = {
+        foo = mkOption {
+          type = int;
+        };
+        bar = mkOption {
+          type = str;
+        };
       };
     };
   };
-};
+}
 ```
 :::
 
@@ -405,10 +407,12 @@ let
     };
   };
 in
-options.mod = mkOption {
-  description = "submodule example";
-  type = with types; submodule modOptions;
-};
+{
+  options.mod = mkOption {
+    description = "submodule example";
+    type = with types; submodule modOptions;
+  };
+}
 ```
 :::
 
@@ -421,29 +425,33 @@ multiple definitions of the submodule option set
 ::: {#ex-submodule-listof-declaration .example}
 ### Declaration of a list of submodules
 ```nix
-options.mod = mkOption {
-  description = "submodule example";
-  type = with types; listOf (submodule {
-    options = {
-      foo = mkOption {
-        type = int;
-      };
-      bar = mkOption {
-        type = str;
+{
+  options.mod = mkOption {
+    description = "submodule example";
+    type = with types; listOf (submodule {
+      options = {
+        foo = mkOption {
+          type = int;
+        };
+        bar = mkOption {
+          type = str;
+        };
       };
-    };
-  });
-};
+    });
+  };
+}
 ```
 :::
 
 ::: {#ex-submodule-listof-definition .example}
 ### Definition of a list of submodules
 ```nix
-config.mod = [
-  { foo = 1; bar = "one"; }
-  { foo = 2; bar = "two"; }
-];
+{
+  config.mod = [
+    { foo = 1; bar = "one"; }
+    { foo = 2; bar = "two"; }
+  ];
+}
 ```
 :::
 
@@ -455,27 +463,31 @@ multiple named definitions of the submodule option set
 ::: {#ex-submodule-attrsof-declaration .example}
 ### Declaration of attribute sets of submodules
 ```nix
-options.mod = mkOption {
-  description = "submodule example";
-  type = with types; attrsOf (submodule {
-    options = {
-      foo = mkOption {
-        type = int;
-      };
-      bar = mkOption {
-        type = str;
+{
+  options.mod = mkOption {
+    description = "submodule example";
+    type = with types; attrsOf (submodule {
+      options = {
+        foo = mkOption {
+          type = int;
+        };
+        bar = mkOption {
+          type = str;
+        };
       };
-    };
-  });
-};
+    });
+  };
+}
 ```
 :::
 
 ::: {#ex-submodule-attrsof-definition .example}
 ### Definition of attribute sets of submodules
 ```nix
-config.mod.one = { foo = 1; bar = "one"; };
-config.mod.two = { foo = 2; bar = "two"; };
+{
+  config.mod.one = { foo = 1; bar = "one"; };
+  config.mod.two = { foo = 2; bar = "two"; };
+}
 ```
 :::
 
@@ -495,10 +507,12 @@ Types are mainly characterized by their `check` and `merge` functions.
     ### Adding a type check
 
     ```nix
-    byte = mkOption {
-      description = "An integer between 0 and 255.";
-      type = types.addCheck types.int (x: x >= 0 && x <= 255);
-    };
+    {
+      byte = mkOption {
+        description = "An integer between 0 and 255.";
+        type = types.addCheck types.int (x: x >= 0 && x <= 255);
+      };
+    }
     ```
     :::
 
@@ -506,12 +520,14 @@ Types are mainly characterized by their `check` and `merge` functions.
     ### Overriding a type check
 
     ```nix
-    nixThings = mkOption {
-      description = "words that start with 'nix'";
-      type = types.str // {
-        check = (x: lib.hasPrefix "nix" x)
+    {
+      nixThings = mkOption {
+        description = "words that start with 'nix'";
+        type = types.str // {
+          check = (x: lib.hasPrefix "nix" x);
+        };
       };
-    };
+    }
     ```
     :::
 
diff --git a/nixpkgs/nixos/doc/manual/development/settings-options.section.md b/nixpkgs/nixos/doc/manual/development/settings-options.section.md
index 71ec9bbc8892..806eee563790 100644
--- a/nixpkgs/nixos/doc/manual/development/settings-options.section.md
+++ b/nixpkgs/nixos/doc/manual/development/settings-options.section.md
@@ -248,28 +248,30 @@ up in the manual.
 ::: {#ex-settings-typed-attrs .example}
 ### Declaring a type-checked `settings` attribute
 ```nix
-settings = lib.mkOption {
-  type = lib.types.submodule {
+{
+  settings = lib.mkOption {
+    type = lib.types.submodule {
+
+      freeformType = settingsFormat.type;
+
+      # Declare an option for the port such that the type is checked and this option
+      # is shown in the manual.
+      options.port = lib.mkOption {
+        type = lib.types.port;
+        default = 8080;
+        description = ''
+          Which port this service should listen on.
+        '';
+      };
 
-    freeformType = settingsFormat.type;
-
-    # Declare an option for the port such that the type is checked and this option
-    # is shown in the manual.
-    options.port = lib.mkOption {
-      type = lib.types.port;
-      default = 8080;
-      description = ''
-        Which port this service should listen on.
-      '';
     };
-
+    default = {};
+    description = ''
+      Configuration for Foo, see
+      <link xlink:href="https://example.com/docs/foo"/>
+      for supported values.
+    '';
   };
-  default = {};
-  description = ''
-    Configuration for Foo, see
-    <link xlink:href="https://example.com/docs/foo"/>
-    for supported values.
-  '';
-};
+}
 ```
 :::
diff --git a/nixpkgs/nixos/doc/manual/development/unit-handling.section.md b/nixpkgs/nixos/doc/manual/development/unit-handling.section.md
index d5ba6a9529d0..1f6a30d6ef34 100644
--- a/nixpkgs/nixos/doc/manual/development/unit-handling.section.md
+++ b/nixpkgs/nixos/doc/manual/development/unit-handling.section.md
@@ -94,11 +94,13 @@ To make an existing sysinit service restart correctly during system switch, you
 have to declare:
 
 ```nix
-systemd.services.my-sysinit = {
-  requiredBy = [ "sysinit-reactivation.target" ];
-  before = [ "sysinit-reactivation.target" ];
-  restartTriggers = [ config.environment.etc."my-sysinit.d".source ];
-};
+{
+  systemd.services.my-sysinit = {
+    requiredBy = [ "sysinit-reactivation.target" ];
+    before = [ "sysinit-reactivation.target" ];
+    restartTriggers = [ config.environment.etc."my-sysinit.d".source ];
+  };
+}
 ```
 
 You need to configure appropriate `restartTriggers` specific to your service.
diff --git a/nixpkgs/nixos/doc/manual/development/writing-modules.chapter.md b/nixpkgs/nixos/doc/manual/development/writing-modules.chapter.md
index 20157a21e890..67a5cc23a6aa 100644
--- a/nixpkgs/nixos/doc/manual/development/writing-modules.chapter.md
+++ b/nixpkgs/nixos/doc/manual/development/writing-modules.chapter.md
@@ -28,7 +28,7 @@ NixOS modules:
 ```nix
 { config, pkgs, ... }:
 
-{ option definitions
+{ # option definitions
 }
 ```
 
@@ -43,15 +43,15 @@ is shown in [Example: Structure of NixOS Modules](#ex-module-syntax).
 
 {
   imports =
-    [ paths of other modules
+    [ # paths of other modules
     ];
 
   options = {
-    option declarations
+    # option declarations
   };
 
   config = {
-    option definitions
+    # option definitions
   };
 }
 ```
diff --git a/nixpkgs/nixos/doc/manual/development/writing-nixos-tests.section.md b/nixpkgs/nixos/doc/manual/development/writing-nixos-tests.section.md
index 50886376c240..3ce12f41c60f 100644
--- a/nixpkgs/nixos/doc/manual/development/writing-nixos-tests.section.md
+++ b/nixpkgs/nixos/doc/manual/development/writing-nixos-tests.section.md
@@ -8,10 +8,10 @@ A NixOS test is a module that has the following structure:
   # One or more machines:
   nodes =
     { machine =
-        { config, pkgs, ... }: { … };
+        { config, pkgs, ... }: { /* ... */ };
       machine2 =
-        { config, pkgs, ... }: { … };
-      …
+        { config, pkgs, ... }: { /* ... */ };
+      # …
     };
 
   testScript =
@@ -46,16 +46,20 @@ Tests are invoked differently depending on whether the test is part of NixOS or
 Tests that are part of NixOS are added to [`nixos/tests/all-tests.nix`](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests/all-tests.nix).
 
 ```nix
+{
   hostname = runTest ./hostname.nix;
+}
 ```
 
 Overrides can be added by defining an anonymous module in `all-tests.nix`.
 
 ```nix
+{
   hostname = runTest {
     imports = [ ./hostname.nix ];
     defaults.networking.firewall.enable = false;
   };
+}
 ```
 
 You can run a test with attribute name `hostname` in `nixos/tests/all-tests.nix` by invoking:
@@ -161,7 +165,7 @@ For faster dev cycles it's also possible to disable the code-linters
   skipLint = true;
   nodes.machine =
     { config, pkgs, ... }:
-    { configuration…
+    { # configuration…
     };
 
   testScript =
@@ -177,12 +181,14 @@ linter directly (again, don't commit this within the Nixpkgs
 repository):
 
 ```nix
+{
   testScript =
     ''
       # fmt: off
       Python code…
       # fmt: on
     '';
+}
 ```
 
 Similarly, the type checking of test scripts can be disabled in the following
@@ -193,7 +199,7 @@ way:
   skipTypeCheck = true;
   nodes.machine =
     { config, pkgs, ... }:
-    { configuration…
+    { # configuration…
     };
 }
 ```
diff --git a/nixpkgs/nixos/doc/manual/installation/building-images-via-systemd-repart.chapter.md b/nixpkgs/nixos/doc/manual/installation/building-images-via-systemd-repart.chapter.md
index 10bee156d113..5a552a54f531 100644
--- a/nixpkgs/nixos/doc/manual/installation/building-images-via-systemd-repart.chapter.md
+++ b/nixpkgs/nixos/doc/manual/installation/building-images-via-systemd-repart.chapter.md
@@ -18,11 +18,11 @@ An example of how to build an image:
     partitions = {
       "esp" = {
         contents = {
-          ...
+          # ...
         };
         repartConfig = {
           Type = "esp";
-          ...
+          # ...
         };
       };
       "root" = {
@@ -30,7 +30,7 @@ An example of how to build an image:
         repartConfig = {
           Type = "root";
           Label = "nixos";
-          ...
+          # ...
         };
       };
     };
@@ -47,19 +47,21 @@ determined by the mount point, you have to set `stripNixStorePrefix = true;` so
 that the prefix is stripped from the paths before copying them into the image.
 
 ```nix
-fileSystems."/nix/store".device = "/dev/disk/by-partlabel/nix-store"
-
-image.repart.partitions = {
-  "store" = {
-    storePaths = [ config.system.build.toplevel ];
-    stripNixStorePrefix = true;
-    repartConfig = {
-      Type = "linux-generic";
-      Label = "nix-store";
-      ...
+{
+  fileSystems."/nix/store".device = "/dev/disk/by-partlabel/nix-store";
+
+  image.repart.partitions = {
+    "store" = {
+      storePaths = [ config.system.build.toplevel ];
+      stripNixStorePrefix = true;
+      repartConfig = {
+        Type = "linux-generic";
+        Label = "nix-store";
+        # ...
+      };
     };
   };
-};
+}
 ```
 
 ## Appliance Image {#sec-image-repart-appliance}
diff --git a/nixpkgs/nixos/doc/manual/installation/changing-config.chapter.md b/nixpkgs/nixos/doc/manual/installation/changing-config.chapter.md
index 9e56b15a880f..07a0074d17e7 100644
--- a/nixpkgs/nixos/doc/manual/installation/changing-config.chapter.md
+++ b/nixpkgs/nixos/doc/manual/installation/changing-config.chapter.md
@@ -87,7 +87,9 @@ set `mutableUsers = false`. Another way is to temporarily add the
 following to your configuration:
 
 ```nix
-users.users.your-user.initialHashedPassword = "test";
+{
+  users.users.your-user.initialHashedPassword = "test";
+}
 ```
 
 *Important:* delete the \$hostname.qcow2 file if you have started the
diff --git a/nixpkgs/nixos/doc/manual/installation/installing-behind-a-proxy.section.md b/nixpkgs/nixos/doc/manual/installation/installing-behind-a-proxy.section.md
index aca151531d0f..691f9c9ccf6d 100644
--- a/nixpkgs/nixos/doc/manual/installation/installing-behind-a-proxy.section.md
+++ b/nixpkgs/nixos/doc/manual/installation/installing-behind-a-proxy.section.md
@@ -7,8 +7,10 @@ To install NixOS behind a proxy, do the following before running
     keep the internet accessible after reboot.
 
     ```nix
-    networking.proxy.default = "http://user:password@proxy:port/";
-    networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
+    {
+      networking.proxy.default = "http://user:password@proxy:port/";
+      networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
+    }
     ```
 
 1.  Setup the proxy environment variables in the shell where you are
diff --git a/nixpkgs/nixos/doc/manual/installation/installing-from-other-distro.section.md b/nixpkgs/nixos/doc/manual/installation/installing-from-other-distro.section.md
index 921592fe5357..10ac2be4e161 100644
--- a/nixpkgs/nixos/doc/manual/installation/installing-from-other-distro.section.md
+++ b/nixpkgs/nixos/doc/manual/installation/installing-from-other-distro.section.md
@@ -89,12 +89,14 @@ The first steps to all these are the same:
     want to add something like this to your `configuration.nix`:
 
     ```nix
-    boot.loader.grub.extraEntries = ''
-      menuentry "Ubuntu" {
-        search --set=ubuntu --fs-uuid 3cc3e652-0c1f-4800-8451-033754f68e6e
-        configfile "($ubuntu)/boot/grub/grub.cfg"
-      }
-    '';
+    {
+      boot.loader.grub.extraEntries = ''
+        menuentry "Ubuntu" {
+          search --set=ubuntu --fs-uuid 3cc3e652-0c1f-4800-8451-033754f68e6e
+          configfile "($ubuntu)/boot/grub/grub.cfg"
+        }
+      '';
+    }
     ```
 
     (You can find the appropriate UUID for your partition in
@@ -164,7 +166,9 @@ The first steps to all these are the same:
     `sudo passwd -l root` if you use `sudo`)
 
     ```nix
-    users.users.root.initialHashedPassword = "";
+    {
+      users.users.root.initialHashedPassword = "";
+    }
     ```
 
 1.  Build the NixOS closure and install it in the `system` profile:
diff --git a/nixpkgs/nixos/doc/manual/installation/installing-virtualbox-guest.section.md b/nixpkgs/nixos/doc/manual/installation/installing-virtualbox-guest.section.md
index 004838e586be..4b9ae0a9c55f 100644
--- a/nixpkgs/nixos/doc/manual/installation/installing-virtualbox-guest.section.md
+++ b/nixpkgs/nixos/doc/manual/installation/installing-virtualbox-guest.section.md
@@ -29,14 +29,18 @@ There are a few modifications you should make in configuration.nix.
 Enable booting:
 
 ```nix
-boot.loader.grub.device = "/dev/sda";
+{
+  boot.loader.grub.device = "/dev/sda";
+}
 ```
 
 Also remove the fsck that runs at startup. It will always fail to run,
 stopping your boot until you press `*`.
 
 ```nix
-boot.initrd.checkJournalingFS = false;
+{
+  boot.initrd.checkJournalingFS = false;
+}
 ```
 
 Shared folders can be given a name and a path in the host system in the
diff --git a/nixpkgs/nixos/doc/manual/installation/upgrading.chapter.md b/nixpkgs/nixos/doc/manual/installation/upgrading.chapter.md
index 79cd4e55be5c..09338bf8723d 100644
--- a/nixpkgs/nixos/doc/manual/installation/upgrading.chapter.md
+++ b/nixpkgs/nixos/doc/manual/installation/upgrading.chapter.md
@@ -101,8 +101,10 @@ You can keep a NixOS system up-to-date automatically by adding the
 following to `configuration.nix`:
 
 ```nix
-system.autoUpgrade.enable = true;
-system.autoUpgrade.allowReboot = true;
+{
+  system.autoUpgrade.enable = true;
+  system.autoUpgrade.allowReboot = true;
+}
 ```
 
 This enables a periodically executed systemd service named
@@ -114,5 +116,7 @@ the new generation contains a different kernel, initrd or kernel
 modules. You can also specify a channel explicitly, e.g.
 
 ```nix
-system.autoUpgrade.channel = "https://channels.nixos.org/nixos-23.11";
+{
+  system.autoUpgrade.channel = "https://channels.nixos.org/nixos-23.11";
+}
 ```
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1509.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1509.section.md
index f47d13008185..c2ac89a09518 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-1509.section.md
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1509.section.md
@@ -253,9 +253,9 @@ Installing Haskell _libraries_ this way, however, is no longer supported. See th
 
   {
     options = {
-      foo = mkOption { … };
+      foo = mkOption { /* … */ };
     };
-    config = mkIf config.foo { … };
+    config = mkIf config.foo { /* … */ };
   }
   ```
 
@@ -268,9 +268,9 @@ Installing Haskell _libraries_ this way, however, is no longer supported. See th
 
   {
     options = {
-      foo = mkOption { option declaration };
+      foo = mkOption { /* option declaration */ };
     };
-    config = mkIf config.foo { option definition };
+    config = mkIf config.foo { /* option definition */ };
   }
   ```
 
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1703.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1703.section.md
index b82c41e28ca3..e20d84d306e8 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-1703.section.md
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1703.section.md
@@ -246,7 +246,7 @@ When upgrading from a previous release, please be aware of the following incompa
   let
     pkgs = import <nixpkgs> {};
   in
-    pkgs.overridePackages (self: super: ...)
+    pkgs.overridePackages (self: super: { /* ... */ })
   ```
 
   should be replaced by:
@@ -255,7 +255,7 @@ When upgrading from a previous release, please be aware of the following incompa
   let
     pkgs = import <nixpkgs> {};
   in
-    import pkgs.path { overlays = [(self: super: ...)]; }
+    import pkgs.path { overlays = [(self: super: { /* ... */ })]; }
   ```
 
 - Autoloading connection tracking helpers is now disabled by default. This default was also changed in the Linux kernel and is considered insecure if not configured properly in your firewall. If you need connection tracking helpers (i.e. for active FTP) please enable `networking.firewall.autoLoadConntrackHelpers` and tune `networking.firewall.connectionTrackingModules` to suit your needs.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2009.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2009.section.md
index eac02a8ff445..900c20dbe717 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-2009.section.md
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2009.section.md
@@ -334,22 +334,18 @@ When upgrading from a previous release, please be aware of the following incompa
 - The remaining configuration flags can now be set directly on the `php` attribute. For example, instead of
 
   ```nix
-  {
-    php.override {
-      config.php.embed = true;
-      config.php.apxs2 = false;
-    }
+  php.override {
+    config.php.embed = true;
+    config.php.apxs2 = false;
   }
   ```
 
   you should now write
 
   ```nix
-  {
-    php.override {
-      embedSupport = true;
-      apxs2Support = false;
-    }
+  php.override {
+    embedSupport = true;
+    apxs2Support = false;
   }
   ```
 
@@ -383,9 +379,10 @@ When upgrading from a previous release, please be aware of the following incompa
   {
     specialisation.example-sub-configuration = {
       configuration = {
-        ...
+        # ...
       };
-  };
+    };
+  }
   ```
 
   Replace a `nesting.children` entry with:
@@ -395,9 +392,10 @@ When upgrading from a previous release, please be aware of the following incompa
     specialisation.example-sub-configuration = {
       inheritParentConfig = false;
       configuration = {
-        ...
+        # ...
       };
-  };
+    };
+  }
   ```
 
   To switch to a specialised configuration at runtime you need to run:
@@ -469,7 +467,7 @@ When upgrading from a previous release, please be aware of the following incompa
     services.bitcoind = {
       enable = true;
       extraConfig = "...";
-      ...
+      # ...
     };
   }
   ```
@@ -483,7 +481,7 @@ When upgrading from a previous release, please be aware of the following incompa
       dataDir = "/var/lib/bitcoind";
       user = "bitcoin";
       extraConfig = "...";
-      ...
+      # ...
     };
   }
   ```
@@ -502,7 +500,7 @@ When upgrading from a previous release, please be aware of the following incompa
   {
     services.dokuwiki = {
       enable = true;
-      ...
+      # ...
     };
   }
   ```
@@ -517,7 +515,7 @@ When upgrading from a previous release, please be aware of the following incompa
         forceSSL = true;
         enableACME = true;
       };
-      ...
+      # ...
     };
   }
   ```
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2205.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2205.section.md
index 6f5a807f478a..3a2c70fb7a31 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-2205.section.md
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2205.section.md
@@ -462,6 +462,7 @@ In addition to numerous new and upgraded packages, this release has the followin
 
   Before:
   ```nix
+  {
     services.keycloak = {
       enable = true;
       httpPort = "8080";
@@ -471,10 +472,12 @@ In addition to numerous new and upgraded packages, this release has the followin
         "subsystem=undertow"."server=default-server"."http-listener=default".proxy-address-forwarding = true;
       };
     };
+  }
   ```
 
   After:
   ```nix
+  {
     services.keycloak = {
       enable = true;
       settings = {
@@ -485,6 +488,7 @@ In addition to numerous new and upgraded packages, this release has the followin
       };
       database.passwordFile = "/run/keys/db_password";
     };
+  }
   ```
 
 - The MoinMoin wiki engine (`services.moinmoin`) has been removed, because Python 2 is being retired from nixpkgs.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2211.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2211.section.md
index 1c73d0c9790d..77cb6c9baadb 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-2211.section.md
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2211.section.md
@@ -254,10 +254,12 @@ In addition to numerous new and upgraded packages, this release includes the fol
 
 - `services.github-runner` and `services.github-runners.<name>` gained the option `serviceOverrides` which allows overriding the systemd `serviceConfig`. If you have been overriding the systemd service configuration (i.e., by defining `systemd.services.github-runner.serviceConfig`), you have to use the `serviceOverrides` option now. Example:
 
-  ```
-  services.github-runner.serviceOverrides.SupplementaryGroups = [
-    "docker"
-  ];
+  ```nix
+  {
+    services.github-runner.serviceOverrides.SupplementaryGroups = [
+      "docker"
+    ];
+  }
   ```
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2305.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2305.section.md
index 21c798b3b4a4..ce874a6e0b2d 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-2305.section.md
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2305.section.md
@@ -25,7 +25,9 @@ In addition to numerous new and updated packages, this release has the following
 
 - NixOS now defaults to using [nsncd](https://github.com/twosigma/nsncd), a non-caching reimplementation of nscd in Rust, as its NSS lookup dispatcher. This replaces the buggy and deprecated nscd implementation provided through glibc. When you find problems, you can switch back by disabling it:
   ```nix
-  services.nscd.enableNsncd = false;
+  {
+    services.nscd.enableNsncd = false;
+  }
   ```
 
 - The internal option `boot.bootspec.enable` is now enabled by default because [RFC 0125](https://github.com/NixOS/rfcs/pull/125) was merged. This means you will have a bootspec document called `boot.json` generated for each system and specialisation in the top-level. This is useful to enable advanced boot use cases in NixOS, such as Secure Boot.
@@ -190,11 +192,13 @@ In addition to numerous new and updated packages, this release has the following
 - MAC-then-encrypt algorithms were removed from the default selection of `services.openssh.settings.Macs`. If you still require these [MACs](https://en.wikipedia.org/wiki/Message_authentication_code), for example when you are relying on libssh2 (e.g. VLC) or the SSH library shipped on the iPhone, you can re-add them like this:
 
   ```nix
-  services.openssh.settings.Macs = [
-    "hmac-sha2-512"
-    "hmac-sha2-256"
-    "umac-128@openssh.com"
-  ];
+  {
+    services.openssh.settings.Macs = [
+      "hmac-sha2-512"
+      "hmac-sha2-256"
+      "umac-128@openssh.com"
+    ];
+  }
   ```
 
 - `podman` now uses the `netavark` network stack. Users will need to delete all of their local containers, images, volumes, etc, by running `podman system reset --force` once before upgrading their systems.
@@ -227,21 +231,25 @@ In addition to numerous new and updated packages, this release has the following
 - The attributes used by `services.snapper.configs.<name>` have changed. Migrate from this:
 
   ```nix
-  services.snapper.configs.example = {
-    subvolume = "/example";
-    extraConfig = ''
-      ALLOW_USERS="alice"
-    '';
-  };
+  {
+    services.snapper.configs.example = {
+      subvolume = "/example";
+      extraConfig = ''
+        ALLOW_USERS="alice"
+      '';
+    };
+  }
   ```
 
   to this:
 
   ```nix
-  services.snapper.configs.example = {
-    SUBVOLUME = "/example";
-    ALLOW_USERS = [ "alice" ];
-  };
+  {
+    services.snapper.configs.example = {
+      SUBVOLUME = "/example";
+      ALLOW_USERS = [ "alice" ];
+    };
+  }
   ```
 
 - The default module options for [services.snapserver.openFirewall](#opt-services.snapserver.openFirewall), [services.tmate-ssh-server.openFirewall](#opt-services.tmate-ssh-server.openFirewall) and [services.unifi-video.openFirewall](#opt-services.unifi-video.openFirewall) have been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
@@ -446,15 +454,17 @@ In addition to numerous new and updated packages, this release has the following
 - NixOS swap partitions with random encryption can now control the sector size, cipher, and key size used to set up the plain encryption device over the underlying block device rather than allowing them to be determined by `cryptsetup(8)`. One can use these features like so:
 
   ```nix
-  swapDevices = [ {
-    device = "/dev/disk/by-partlabel/swapspace";
-    randomEncryption = {
-      enable = true;
-      cipher = "aes-xts-plain64";
-      keySize = 512;
-      sectorSize = 4096;
-    };
-  } ];
+  {
+    swapDevices = [ {
+      device = "/dev/disk/by-partlabel/swapspace";
+      randomEncryption = {
+        enable = true;
+        cipher = "aes-xts-plain64";
+        keySize = 512;
+        sectorSize = 4096;
+      };
+    } ];
+  }
   ```
 
 - New option `security.pam.zfs` to enable unlocking and mounting of encrypted ZFS home dataset at login.
@@ -465,7 +475,9 @@ In addition to numerous new and updated packages, this release has the following
 
 - PostgreSQL has added opt-in support for [JIT compilation](https://www.postgresql.org/docs/current/jit-reason.html). It can be enabled like this:
   ```nix
-  services.postgresql.enableJIT = true;
+  {
+    services.postgresql.enableJIT = true;
+  }
   ```
 
 - `services.netdata` offers a [`services.netdata.deadlineBeforeStopSec`](#opt-services.netdata.deadlineBeforeStopSec) option which will control the deadline (in seconds) after which systemd will consider your netdata instance as dead if it didn't start in the elapsed time. It is helpful when your netdata instance takes longer to start because of a large amount of state or upgrades.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md
index 1aef1828908f..5313f04cb789 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -700,11 +700,13 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
   will probably be removed eventually.
 
   ```nix
-  qt = {
-    enable = true;
-    platformTheme = "gnome";
-    style = "adwaita";
-  };
+  {
+    qt = {
+      enable = true;
+      platformTheme = "gnome";
+      style = "adwaita";
+    };
+  }
   ```
 
 - DocBook option documentation is no longer supported, all module documentation
@@ -885,11 +887,13 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
   to a compatible major version, so they can move at their own pace.
 
   ```nix
-  python = python3.override {
-    packageOverrides = self: super: {
-      django = super.django_3;
+  {
+    python = python3.override {
+      packageOverrides = self: super: {
+        django = super.django_3;
+      };
     };
-  };
+  }
   ```
 
 - The `qemu-vm.nix` module by default now identifies block devices via
@@ -1228,16 +1232,18 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
 - CoreDNS may be built with external plugins now. This may be done by
   overriding `externalPlugins` and `vendorHash` arguments like this:
 
-  ```
-  services.coredns = {
-    enable = true;
-    package = pkgs.coredns.override {
-      externalPlugins = [
-        {name = "fanout"; repo = "github.com/networkservicemesh/fanout"; version = "v1.9.1";}
-      ];
-      vendorHash = "<SRI hash>";
+  ```nix
+  {
+    services.coredns = {
+      enable = true;
+      package = pkgs.coredns.override {
+        externalPlugins = [
+          {name = "fanout"; repo = "github.com/networkservicemesh/fanout"; version = "v1.9.1";}
+        ];
+        vendorHash = "<SRI hash>";
+      };
     };
-  };
+  }
   ```
 
   To get the necessary SRI hash, set `vendorHash = "";`. The build will fail
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2405.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2405.section.md
index 19ff6f4485cd..2909c40fa291 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-2405.section.md
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2405.section.md
@@ -32,6 +32,11 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - Julia environments can now be built with arbitrary packages from the ecosystem using the `.withPackages` function. For example: `julia.withPackages ["Plots"]`.
 
+- The PipeWire and WirePlumber modules have removed support for using
+`environment.etc."pipewire/..."` and `environment.etc."wireplumber/..."`.
+Use `services.pipewire.extraConfig` or `services.pipewire.configPackages` for PipeWire and
+`services.pipewire.wireplumber.configPackages` for WirePlumber instead."
+
 - A new option `systemd.sysusers.enable` was added. If enabled, users and
   groups are created with systemd-sysusers instead of with a custom perl script.
 
@@ -52,7 +57,7 @@ In addition to numerous new and upgraded packages, this release has the followin
   without perl). Previously, the NixOS activation depended on two perl scripts
   which can now be replaced via an opt-in mechanism. To make your system
   perlless, you can use the new perlless profile:
-  ```
+  ```nix
   { modulesPath, ... }: {
     imports = [ "${modulesPath}/profiles/perlless.nix" ];
   }
@@ -68,6 +73,8 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - [Guix](https://guix.gnu.org), a functional package manager inspired by Nix. Available as [services.guix](#opt-services.guix.enable).
 
+- [PhotonVision](https://photonvision.org/), a free, fast, and easy-to-use computer vision solution for the FIRST® Robotics Competition.
+
 - [pyLoad](https://pyload.net/), a FOSS download manager written in Python. Available as [services.pyload](#opt-services.pyload.enable)
 
 - [maubot](https://github.com/maubot/maubot), a plugin-based Matrix bot framework. Available as [services.maubot](#opt-services.maubot.enable).
@@ -78,6 +85,8 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - [pretalx](https://github.com/pretalx/pretalx), a conference planning tool. Available as [services.pretalx](#opt-services.pretalx.enable).
 
+- [dnsproxy](https://github.com/AdguardTeam/dnsproxy), a simple DNS proxy with DoH, DoT, DoQ and DNSCrypt support. Available as [services.dnsproxy](#opt-services.dnsproxy.enable).
+
 - [rspamd-trainer](https://gitlab.com/onlime/rspamd-trainer), script triggered by a helper which reads mails from a specific mail inbox and feeds them into rspamd for spam/ham training.
 
 - [ollama](https://ollama.ai), server for running large language models locally.
@@ -95,6 +104,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - [transfer-sh](https://github.com/dutchcoders/transfer.sh), a tool that supports easy and fast file sharing from the command-line. Available as [services.transfer-sh](#opt-services.transfer-sh.enable).
 
+- [MollySocket](https://github.com/mollyim/mollysocket) which allows getting Signal notifications via UnifiedPush.
+
 - [Suwayomi Server](https://github.com/Suwayomi/Suwayomi-Server), a free and open source manga reader server that runs extensions built for [Tachiyomi](https://tachiyomi.org). Available as [services.suwayomi-server](#opt-services.suwayomi-server.enable).
 
 - [ping_exporter](https://github.com/czerwonk/ping_exporter), a Prometheus exporter for ICMP echo requests. Available as [services.prometheus.exporters.ping](#opt-services.prometheus.exporters.ping.enable).
@@ -107,6 +118,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - [Pretix](https://pretix.eu/about/en/), an open source ticketing software for events. Available as [services.pretix]($opt-services-pretix.enable).
 
+- [microsocks](https://github.com/rofl0r/microsocks), a tiny, portable SOCKS5 server with very moderate resource usage. Available as [services.microsocks]($opt-services-microsocks.enable).
+
 - [Clevis](https://github.com/latchset/clevis), a pluggable framework for automated decryption, used to unlock encrypted devices in initrd. Available as [boot.initrd.clevis.enable](#opt-boot.initrd.clevis.enable).
 
 - [fritz-exporter](https://github.com/pdreker/fritz_exporter), a Prometheus exporter for extracting metrics from [FRITZ!](https://avm.de/produkte/) devices. Available as [services.prometheus.exporters.fritz](#opt-services.prometheus.exporters.fritz.enable).
@@ -135,6 +148,9 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - The `power.ups` module now generates `upsd.conf`, `upsd.users` and `upsmon.conf` automatically from a set of new configuration options. This breaks compatibility with existing `power.ups` setups where these files were created manually. Back up these files before upgrading NixOS.
 
+- `programs.nix-ld.libraries` no longer sets `baseLibraries` via the option's default but in config and now merges any additional libraries with the default ones.
+  This means that `lib.mkForce` must be used to clear the list of default libraries.
+
 - `pdns` was updated to version [v4.9.x](https://doc.powerdns.com/authoritative/changelog/4.9.html), which introduces breaking changes. Check out the [Upgrade Notes](https://doc.powerdns.com/authoritative/upgrading.html#to-4-9-0) for details.
 
 - `unrar` was updated to v7. See [changelog](https://www.rarlab.com/unrar7notes.htm) for more information.
@@ -175,6 +191,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
   "mysecret"` becomes `services.aria2.rpcSecretFile = "/path/to/secret_file"`
   where the file `secret_file` contains the string `mysecret`.
 
+- `buildGoModule` now throws error when `vendorHash` is not specified. `vendorSha256`, deprecated in Nixpkgs 23.11, is now ignored and is no longer a `vendorHash` alias.
+
 - Invidious has changed its default database username from `kemal` to `invidious`. Setups involving an externally provisioned database (i.e. `services.invidious.database.createLocally == false`) should adjust their configuration accordingly. The old `kemal` user will not be removed automatically even when the database is provisioned automatically.(https://github.com/NixOS/nixpkgs/pull/265857)
 
 - `writeReferencesToFile` is deprecated in favour of the new trivial build helper `writeClosure`. The latter accepts a list of paths and has an unambiguous name and cleaner implementation.
@@ -209,13 +227,14 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
   Example:
 
   ```nix
+  {
     locations."/".extraConfig = ''
       add_header Alt-Svc 'h3=":$server_port"; ma=86400';
     '';
     locations."^~ /assets/".extraConfig = ''
       add_header Alt-Svc 'h3=":$server_port"; ma=86400';
     '';
-
+  }
   ```
 
 - The package `optparse-bash` is now dropped due to upstream inactivity. Alternatives available in Nixpkgs include [`argc`](https://github.com/sigoden/argc), [`argbash`](https://github.com/matejak/argbash), [`bashly`](https://github.com/DannyBen/bashly) and [`gum`](https://github.com/charmbracelet/gum), to name a few.
@@ -326,6 +345,9 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
   - The `erlang_node_short_name`, `erlang_node_name`, `port` and `options` configuration parameters are gone, and have been replaced with an `environment` parameter.
     Use the appropriate [environment variables](https://hexdocs.pm/livebook/readme.html#environment-variables) inside `environment` to configure the service instead.
 
+- The `crystal` package has been updated to 1.11.x, which has some breaking changes.
+  Refer to crystal's changelog for more information. ([v1.10](https://github.com/crystal-lang/crystal/blob/master/CHANGELOG.md#1100-2023-10-09), [v1.11](https://github.com/crystal-lang/crystal/blob/master/CHANGELOG.md#1110-2024-01-08))
+
 ## Other Notable Changes {#sec-release-24.05-notable-changes}
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@@ -351,6 +373,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 - The Matrix homeserver [Synapse](https://element-hq.github.io/synapse/) module now supports configuring UNIX domain socket [listeners](#opt-services.matrix-synapse.settings.listeners) through the `path` option.
   The default replication worker on the main instance has been migrated away from TCP sockets to UNIX domain sockets.
 
+- The initrd ssh daemon module got a new option to add authorized keys via a list of files using `boot.initrd.network.ssh.authorizedKeyFiles`.
+
 - Programs written in [Nim](https://nim-lang.org/) are built with libraries selected by lockfiles.
   The `nimPackages` and `nim2Packages` sets have been removed.
   See https://nixos.org/manual/nixpkgs/unstable#nim for more information.
@@ -432,6 +456,10 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - `nextcloud-setup.service` no longer changes the group of each file & directory inside `/var/lib/nextcloud/{config,data,store-apps}` if one of these directories has the wrong owner group. This was part of transitioning the group used for `/var/lib/nextcloud`, but isn't necessary anymore.
 
+- `services.kavita` now uses the freeform option `services.kavita.settings` for the application settings file.
+  The options `services.kavita.ipAdresses` and `services.kavita.port` now exist at `services.kavita.settings.IpAddresses`
+  and `services.kavita.settings.IpAddresses`.
+
 - The `krb5` module has been rewritten and moved to `security.krb5`, moving all options but `security.krb5.enable` and `security.krb5.package` into `security.krb5.settings`.
 
 - Gitea 1.21 upgrade has several breaking changes, including:
@@ -446,6 +474,12 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - The module `services.github-runner` has been removed. To configure a single GitHub Actions Runner refer to `services.github-runners.*`. Note that this will trigger a new runner registration.
 
+- The `services.slskd` has been refactored to include more configuation options in
+  the freeform `services.slskd.settings` option, and some defaults (including listen ports)
+  have been changed to match the upstream defaults. Additionally, disk logging is now
+  disabled by default, and the log rotation timer has been removed.
+  The nginx virtualhost option is now of the `vhost-options` type.
+
 - The `btrbk` module now automatically selects and provides required compression
   program depending on the configured `stream_compress` option. Since this
   replaces the need for the `extraPackages` option, this option will be
diff --git a/nixpkgs/nixos/lib/make-options-doc/default.nix b/nixpkgs/nixos/lib/make-options-doc/default.nix
index 09a4022845e0..4ae9d018e96f 100644
--- a/nixpkgs/nixos/lib/make-options-doc/default.nix
+++ b/nixpkgs/nixos/lib/make-options-doc/default.nix
@@ -232,19 +232,5 @@ in rec {
       echo "file json-br $dst/options.json.br" >> $out/nix-support/hydra-build-products
     '';
 
-  optionsDocBook = lib.warn "optionsDocBook is deprecated since 23.11 and will be removed in 24.05"
-    (pkgs.runCommand "options-docbook.xml" {
-      nativeBuildInputs = [
-        pkgs.nixos-render-docs
-      ];
-    } ''
-      nixos-render-docs -j $NIX_BUILD_CORES options docbook \
-        --manpage-urls ${pkgs.path + "/doc/manpage-urls.json"} \
-        --revision ${lib.escapeShellArg revision} \
-        --document-type ${lib.escapeShellArg documentType} \
-        --varlist-id ${lib.escapeShellArg variablelistId} \
-        --id-prefix ${lib.escapeShellArg optionIdPrefix} \
-        ${optionsJSON}/share/doc/nixos/options.json \
-        "$out"
-    '');
+  optionsDocBook = throw "optionsDocBook has been removed in 24.05";
 }
diff --git a/nixpkgs/nixos/lib/test-driver/test_driver/machine.py b/nixpkgs/nixos/lib/test-driver/test_driver/machine.py
index df8628bce956..c117aab7c401 100644
--- a/nixpkgs/nixos/lib/test-driver/test_driver/machine.py
+++ b/nixpkgs/nixos/lib/test-driver/test_driver/machine.py
@@ -1250,6 +1250,5 @@ class Machine:
             check_return=False,
             check_output=False,
         )
-        self.wait_for_console_text(r"systemd\[1\]:.*Switching root\.")
         self.connected = False
         self.connect()
diff --git a/nixpkgs/nixos/modules/config/users-groups.nix b/nixpkgs/nixos/modules/config/users-groups.nix
index 02cd1a17f538..f9750b7263ca 100644
--- a/nixpkgs/nixos/modules/config/users-groups.nix
+++ b/nixpkgs/nixos/modules/config/users-groups.nix
@@ -496,6 +496,7 @@ let
     in
       filter types.shellPackage.check shells;
 
+  lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
 in {
   imports = [
     (mkAliasOptionModuleMD [ "users" "extraUsers" ] [ "users" "users" ])
@@ -695,24 +696,31 @@ in {
       '';
     } else ""; # keep around for backwards compatibility
 
-    system.activationScripts.update-lingering = let
-      lingerDir = "/var/lib/systemd/linger";
-      lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
-      lingeringUsersFile = builtins.toFile "lingering-users"
-        (concatStrings (map (s: "${s}\n")
-          (sort (a: b: a < b) lingeringUsers)));  # this sorting is important for `comm` to work correctly
-    in stringAfter [ "users" ] ''
-      if [ -e ${lingerDir} ] ; then
+    systemd.services.linger-users = lib.mkIf ((builtins.length lingeringUsers) > 0) {
+      wantedBy = ["multi-user.target"];
+      after = ["systemd-logind.service"];
+      requires = ["systemd-logind.service"];
+
+      script = let
+        lingerDir = "/var/lib/systemd/linger";
+        lingeringUsersFile = builtins.toFile "lingering-users"
+          (concatStrings (map (s: "${s}\n")
+            (sort (a: b: a < b) lingeringUsers)));  # this sorting is important for `comm` to work correctly
+      in ''
+        mkdir -vp ${lingerDir}
         cd ${lingerDir}
-        for user in ${lingerDir}/*; do
-          if ! id "$user" >/dev/null 2>&1; then
+        for user in $(ls); do
+          if ! id "$user" >/dev/null; then
+            echo "Removing linger for missing user $user"
             rm --force -- "$user"
           fi
         done
-        ls ${lingerDir} | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
-        ls ${lingerDir} | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl  enable-linger
-      fi
-    '';
+        ls | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
+        ls | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl  enable-linger
+      '';
+
+      serviceConfig.Type = "oneshot";
+    };
 
     # Warn about user accounts with deprecated password hashing schemes
     # This does not work when the users and groups are created by
diff --git a/nixpkgs/nixos/modules/hardware/video/nvidia.nix b/nixpkgs/nixos/modules/hardware/video/nvidia.nix
index 3b983f768f91..352c8d8ead54 100644
--- a/nixpkgs/nixos/modules/hardware/video/nvidia.nix
+++ b/nixpkgs/nixos/modules/hardware/video/nvidia.nix
@@ -396,6 +396,9 @@ in {
             modules = [nvidia_x11.bin];
             display = !offloadCfg.enable;
             deviceSection =
+              ''
+                Option "SidebandSocketPath" "/run/nvidia-xdriver/"
+              '' +
               lib.optionalString primeEnabled
               ''
                 BusID "${pCfg.nvidiaBusId}"
@@ -533,8 +536,14 @@ in {
 
         hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
 
-        systemd.tmpfiles.rules =
-          lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+        systemd.tmpfiles.rules = [
+          # Remove the following log message:
+          #    (WW) NVIDIA: Failed to bind sideband socket to
+          #    (WW) NVIDIA:     '/var/run/nvidia-xdriver-b4f69129' Permission denied
+          #
+          # https://bbs.archlinux.org/viewtopic.php?pid=1909115#p1909115
+          "d /run/nvidia-xdriver 0770 root users"
+        ] ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
           "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
 
         boot = {
diff --git a/nixpkgs/nixos/modules/i18n/input-method/default.md b/nixpkgs/nixos/modules/i18n/input-method/default.md
index 42cb8a8d7b6a..6d12462b788e 100644
--- a/nixpkgs/nixos/modules/i18n/input-method/default.md
+++ b/nixpkgs/nixos/modules/i18n/input-method/default.md
@@ -22,11 +22,13 @@ friendly input method user interface.
 
 The following snippet can be used to configure IBus:
 
-```
-i18n.inputMethod = {
-  enabled = "ibus";
-  ibus.engines = with pkgs.ibus-engines; [ anthy hangul mozc ];
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "ibus";
+    ibus.engines = with pkgs.ibus-engines; [ anthy hangul mozc ];
+  };
+}
 ```
 
 `i18n.inputMethod.ibus.engines` is optional and can be used
@@ -48,8 +50,10 @@ Available extra IBus engines are:
     methods, it must appear in the list of engines along with
     `table`. For example:
 
-    ```
-    ibus.engines = with pkgs.ibus-engines; [ table table-others ];
+    ```nix
+    {
+      ibus.engines = with pkgs.ibus-engines; [ table table-others ];
+    }
     ```
 
 To use any input method, the package must be added in the configuration, as
@@ -74,11 +78,13 @@ built-in Input Method Engine, Pinyin, QuWei and Table-based input methods.
 
 The following snippet can be used to configure Fcitx:
 
-```
-i18n.inputMethod = {
-  enabled = "fcitx5";
-  fcitx5.addons = with pkgs; [ fcitx5-mozc fcitx5-hangul fcitx5-m17n ];
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "fcitx5";
+    fcitx5.addons = with pkgs; [ fcitx5-mozc fcitx5-hangul fcitx5-m17n ];
+  };
+}
 ```
 
 `i18n.inputMethod.fcitx5.addons` is optional and can be
@@ -110,10 +116,12 @@ phonetic Korean characters (hangul) and pictographic Korean characters
 
 The following snippet can be used to configure Nabi:
 
-```
-i18n.inputMethod = {
-  enabled = "nabi";
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "nabi";
+  };
+}
 ```
 
 ## Uim {#module-services-input-methods-uim}
@@ -123,10 +131,12 @@ framework. Applications can use it through so-called bridges.
 
 The following snippet can be used to configure uim:
 
-```
-i18n.inputMethod = {
-  enabled = "uim";
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "uim";
+  };
+}
 ```
 
 Note: The [](#opt-i18n.inputMethod.uim.toolbar) option can be
@@ -141,10 +151,12 @@ etc...
 
 The following snippet can be used to configure Hime:
 
-```
-i18n.inputMethod = {
-  enabled = "hime";
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "hime";
+  };
+}
 ```
 
 ## Kime {#module-services-input-methods-kime}
@@ -153,8 +165,10 @@ Kime is Korean IME. it's built with Rust language and let you get simple, safe,
 
 The following snippet can be used to configure Kime:
 
-```
-i18n.inputMethod = {
-  enabled = "kime";
-};
+```nix
+{
+  i18n.inputMethod = {
+    enabled = "kime";
+  };
+}
 ```
diff --git a/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl b/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl
index 2f9edba4f0c9..ef25b8b296e6 100644
--- a/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl
+++ b/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl
@@ -257,7 +257,7 @@ foreach my $path (glob "/sys/class/{block,mmc_host}/*") {
 
 # Add bcache module, if needed.
 my @bcacheDevices = glob("/dev/bcache*");
-@bcacheDevices = grep(!qr#dev/bcachefs.*#, @bcacheDevices);
+@bcacheDevices = grep(!m#dev/bcachefs.*#, @bcacheDevices);
 if (scalar @bcacheDevices > 0) {
     push @initrdAvailableKernelModules, "bcache";
 }
diff --git a/nixpkgs/nixos/modules/module-list.nix b/nixpkgs/nixos/modules/module-list.nix
index a103869c16aa..2ee646291e3b 100644
--- a/nixpkgs/nixos/modules/module-list.nix
+++ b/nixpkgs/nixos/modules/module-list.nix
@@ -193,6 +193,7 @@
   ./programs/gnome-disks.nix
   ./programs/gnome-terminal.nix
   ./programs/gnupg.nix
+  ./programs/goldwarden.nix
   ./programs/gpaste.nix
   ./programs/gphoto2.nix
   ./programs/haguichi.nix
@@ -728,6 +729,7 @@
   ./services/misc/mbpfan.nix
   ./services/misc/mediatomb.nix
   ./services/misc/metabase.nix
+  ./services/misc/mollysocket.nix
   ./services/misc/moonraker.nix
   ./services/misc/mqtt2influxdb.nix
   ./services/misc/n8n.nix
@@ -945,6 +947,7 @@
   ./services/networking/dnscrypt-wrapper.nix
   ./services/networking/dnsdist.nix
   ./services/networking/dnsmasq.nix
+  ./services/networking/dnsproxy.nix
   ./services/networking/doh-proxy-rust.nix
   ./services/networking/ejabberd.nix
   ./services/networking/envoy.nix
@@ -1020,6 +1023,7 @@
   ./services/networking/lxd-image-server.nix
   ./services/networking/magic-wormhole-mailbox-server.nix
   ./services/networking/matterbridge.nix
+  ./services/networking/microsocks.nix
   ./services/networking/mihomo.nix
   ./services/networking/minidlna.nix
   ./services/networking/miniupnpd.nix
@@ -1106,6 +1110,11 @@
   ./services/networking/rpcbind.nix
   ./services/networking/rxe.nix
   ./services/networking/sabnzbd.nix
+  ./services/networking/scion/scion.nix
+  ./services/networking/scion/scion-control.nix
+  ./services/networking/scion/scion-daemon.nix
+  ./services/networking/scion/scion-dispatcher.nix
+  ./services/networking/scion/scion-router.nix
   ./services/networking/seafile.nix
   ./services/networking/searx.nix
   ./services/networking/shadowsocks.nix
@@ -1274,6 +1283,7 @@
   ./services/video/go2rtc/default.nix
   ./services/video/frigate.nix
   ./services/video/mirakurun.nix
+  ./services/video/photonvision.nix
   ./services/video/replay-sorcery.nix
   ./services/video/mediamtx.nix
   ./services/video/unifi-video.nix
diff --git a/nixpkgs/nixos/modules/profiles/all-hardware.nix b/nixpkgs/nixos/modules/profiles/all-hardware.nix
index 4857ea4dbeae..249b767593f2 100644
--- a/nixpkgs/nixos/modules/profiles/all-hardware.nix
+++ b/nixpkgs/nixos/modules/profiles/all-hardware.nix
@@ -58,15 +58,7 @@ in
       # Hyper-V support.
       "hv_storvsc"
     ] ++ lib.optionals pkgs.stdenv.hostPlatform.isAarch [
-      # Most of the following falls into two categories:
-      #  - early KMS / early display
-      #  - early storage (e.g. USB) support
-
-      # Allows using framebuffer configured by the initial boot firmware
-      "simplefb"
-
       # Allwinner support
-
       # Required for early KMS
       "sun4i-drm"
       "sun8i-mixer" # Audio, but required for kms
@@ -75,7 +67,6 @@ in
       "pwm-sun4i"
 
       # Broadcom
-
       "vc4"
     ] ++ lib.optionals pkgs.stdenv.isAarch64 [
       # Most of the following falls into two categories:
diff --git a/nixpkgs/nixos/modules/profiles/macos-builder.nix b/nixpkgs/nixos/modules/profiles/macos-builder.nix
index 6c2602881d6b..786e26cf98f7 100644
--- a/nixpkgs/nixos/modules/profiles/macos-builder.nix
+++ b/nixpkgs/nixos/modules/profiles/macos-builder.nix
@@ -145,6 +145,8 @@ in
         # This installCredentials script is written so that it's as easy as
         # possible for a user to audit before confirming the `sudo`
         installCredentials = hostPkgs.writeShellScript "install-credentials" ''
+          set -euo pipefail
+
           KEYS="''${1}"
           INSTALL=${hostPkgs.coreutils}/bin/install
           "''${INSTALL}" -g nixbld -m 600 "''${KEYS}/${user}_${keyType}" ${privateKey}
@@ -154,6 +156,9 @@ in
         hostPkgs = config.virtualisation.host.pkgs;
 
         script = hostPkgs.writeShellScriptBin "create-builder" (
+          ''
+            set -euo pipefail
+          '' +
           # When running as non-interactively as part of a DarwinConfiguration the working directory
           # must be set to a writeable directory.
         (if cfg.workingDirectory != "." then ''
diff --git a/nixpkgs/nixos/modules/programs/digitalbitbox/default.md b/nixpkgs/nixos/modules/programs/digitalbitbox/default.md
index 9bca14e97ffe..5147bb971e3a 100644
--- a/nixpkgs/nixos/modules/programs/digitalbitbox/default.md
+++ b/nixpkgs/nixos/modules/programs/digitalbitbox/default.md
@@ -4,8 +4,10 @@ Digital Bitbox is a hardware wallet and second-factor authenticator.
 
 The `digitalbitbox` programs module may be installed by setting
 `programs.digitalbitbox` to `true` in a manner similar to
-```
-programs.digitalbitbox.enable = true;
+```nix
+{
+  programs.digitalbitbox.enable = true;
+}
 ```
 and bundles the `digitalbitbox` package (see [](#sec-digitalbitbox-package)),
 which contains the `dbb-app` and `dbb-cli` binaries, along with the hardware
@@ -21,27 +23,33 @@ For more information, see <https://digitalbitbox.com/start_linux>.
 
 The binaries, `dbb-app` (a GUI tool) and `dbb-cli` (a CLI tool), are available
 through the `digitalbitbox` package which could be installed as follows:
-```
-environment.systemPackages = [
-  pkgs.digitalbitbox
-];
+```nix
+{
+  environment.systemPackages = [
+    pkgs.digitalbitbox
+  ];
+}
 ```
 
 ## Hardware {#sec-digitalbitbox-hardware-module}
 
 The digitalbitbox hardware package enables the udev rules for Digital Bitbox
 devices and may be installed as follows:
-```
-hardware.digitalbitbox.enable = true;
+```nix
+{
+  hardware.digitalbitbox.enable = true;
+}
 ```
 
 In order to alter the udev rules, one may provide different values for the
 `udevRule51` and `udevRule52` attributes by means of overriding as follows:
-```
-programs.digitalbitbox = {
-  enable = true;
-  package = pkgs.digitalbitbox.override {
-    udevRule51 = "something else";
+```nix
+{
+  programs.digitalbitbox = {
+    enable = true;
+    package = pkgs.digitalbitbox.override {
+      udevRule51 = "something else";
+    };
   };
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/programs/goldwarden.nix b/nixpkgs/nixos/modules/programs/goldwarden.nix
new file mode 100644
index 000000000000..26f9a87c1986
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/goldwarden.nix
@@ -0,0 +1,50 @@
+{ lib, config, pkgs, ... }:
+let
+  cfg = config.programs.goldwarden;
+in
+{
+  options.programs.goldwarden = {
+    enable = lib.mkEnableOption "Goldwarden";
+    package = lib.mkPackageOption pkgs "goldwarden" {};
+    useSshAgent = lib.mkEnableOption "Goldwarden's SSH Agent" // { default = true; };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [{
+       assertion = cfg.useSshAgent -> !config.programs.ssh.startAgent;
+       message = "Only one ssh-agent can be used at a time.";
+    }];
+
+    environment = {
+      etc = lib.mkIf config.programs.chromium.enable {
+        "chromium/native-messaging-hosts/com.8bit.bitwarden.json".source = "${cfg.package}/etc/chromium/native-messaging-hosts/com.8bit.bitwarden.json";
+        "opt/chrome/native-messaging-hosts/com.8bit.bitwarden.json".source = "${cfg.package}/etc/chrome/native-messaging-hosts/com.8bit.bitwarden.json";
+      };
+
+      extraInit = lib.mkIf cfg.useSshAgent ''
+        if [ -z "$SSH_AUTH_SOCK" -a -n "$HOME" ]; then
+          export SSH_AUTH_SOCK="$HOME/.goldwarden-ssh-agent.sock"
+        fi
+      '';
+
+      systemPackages = [
+        # for cli and polkit action
+        cfg.package
+        # binary exec's into pinentry which should match the DE
+        config.programs.gnupg.agent.pinentryPackage
+      ];
+    };
+
+    programs.firefox.nativeMessagingHosts.packages = [ cfg.package ];
+
+    # see https://github.com/quexten/goldwarden/blob/main/cmd/goldwarden.service
+    systemd.user.services.goldwarden = {
+      description = "Goldwarden daemon";
+      wantedBy = [ "graphical-session.target" ];
+      after = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = "${lib.getExe cfg.package} daemonize";
+      path = [ config.programs.gnupg.agent.pinentryPackage ];
+      unitConfig.ConditionUser = "!@system";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/nix-ld.nix b/nixpkgs/nixos/modules/programs/nix-ld.nix
index 6f36ce33640c..b095437733cc 100644
--- a/nixpkgs/nixos/modules/programs/nix-ld.nix
+++ b/nixpkgs/nixos/modules/programs/nix-ld.nix
@@ -3,7 +3,7 @@ let
   cfg = config.programs.nix-ld;
 
   nix-ld-libraries = pkgs.buildEnv {
-    name = "lb-library-path";
+    name = "ld-library-path";
     pathsToLink = [ "/lib" ];
     paths = map lib.getLib cfg.libraries;
     # TODO make glibc here configurable?
@@ -13,25 +13,6 @@ let
     extraPrefix = "/share/nix-ld";
     ignoreCollisions = true;
   };
-
-  # We currently take all libraries from systemd and nix as the default.
-  # Is there a better list?
-  baseLibraries = with pkgs; [
-    zlib
-    zstd
-    stdenv.cc.cc
-    curl
-    openssl
-    attr
-    libssh
-    bzip2
-    libxml2
-    acl
-    libsodium
-    util-linux
-    xz
-    systemd
-  ];
 in
 {
   meta.maintainers = [ lib.maintainers.mic92 ];
@@ -41,7 +22,7 @@ in
     libraries = lib.mkOption {
       type = lib.types.listOf lib.types.package;
       description = lib.mdDoc "Libraries that automatically become available to all programs. The default set includes common libraries.";
-      default = baseLibraries;
+      default = [ ];
       defaultText = lib.literalExpression "baseLibraries derived from systemd and nix dependencies.";
     };
   };
@@ -57,5 +38,24 @@ in
       NIX_LD = "/run/current-system/sw/share/nix-ld/lib/ld.so";
       NIX_LD_LIBRARY_PATH = "/run/current-system/sw/share/nix-ld/lib";
     };
+
+    # We currently take all libraries from systemd and nix as the default.
+    # Is there a better list?
+    programs.nix-ld.libraries = with pkgs; [
+      zlib
+      zstd
+      stdenv.cc.cc
+      curl
+      openssl
+      attr
+      libssh
+      bzip2
+      libxml2
+      acl
+      libsodium
+      util-linux
+      xz
+      systemd
+    ];
   };
 }
diff --git a/nixpkgs/nixos/modules/programs/plotinus.md b/nixpkgs/nixos/modules/programs/plotinus.md
index fac3bbad1e08..0a2c688c722c 100644
--- a/nixpkgs/nixos/modules/programs/plotinus.md
+++ b/nixpkgs/nixos/modules/programs/plotinus.md
@@ -12,6 +12,8 @@ palette provides a searchable list of of all menu items in the application.
 
 To enable Plotinus, add the following to your
 {file}`configuration.nix`:
-```
-programs.plotinus.enable = true;
+```nix
+{
+  programs.plotinus.enable = true;
+}
 ```
diff --git a/nixpkgs/nixos/modules/programs/steam.nix b/nixpkgs/nixos/modules/programs/steam.nix
index c93a34f61849..bab9bf8107b6 100644
--- a/nixpkgs/nixos/modules/programs/steam.nix
+++ b/nixpkgs/nixos/modules/programs/steam.nix
@@ -45,6 +45,8 @@ in {
       apply = steam: steam.override (prev: {
         extraEnv = (lib.optionalAttrs (cfg.extraCompatPackages != [ ]) {
           STEAM_EXTRA_COMPAT_TOOLS_PATHS = makeSearchPathOutput "steamcompattool" "" cfg.extraCompatPackages;
+        }) // (optionalAttrs cfg.extest.enable {
+          LD_PRELOAD = "${pkgs.pkgsi686Linux.extest}/lib/libextest.so";
         }) // (prev.extraEnv or {});
         extraLibraries = pkgs: let
           prevLibs = if prev ? extraLibraries then prev.extraLibraries pkgs else [ ];
@@ -59,8 +61,6 @@ in {
           # use the setuid wrapped bubblewrap
           bubblewrap = "${config.security.wrapperDir}/..";
         };
-      } // optionalAttrs cfg.extest.enable {
-        extraEnv.LD_PRELOAD = "${pkgs.pkgsi686Linux.extest}/lib/libextest.so";
       });
       description = lib.mdDoc ''
         The Steam package to use. Additional libraries are added from the system
diff --git a/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md b/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md
index 6a310006edbf..7e4a41641eea 100644
--- a/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md
+++ b/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md
@@ -9,7 +9,7 @@ prompt themes.
 The module uses the `oh-my-zsh` package with all available
 features. The initial setup using Nix expressions is fairly similar to the
 configuration format of `oh-my-zsh`.
-```
+```nix
 {
   programs.zsh.ohMyZsh = {
     enable = true;
@@ -33,7 +33,7 @@ environment variable for this which points to a directory with additional
 scripts.
 
 The module can do this as well:
-```
+```nix
 {
   programs.zsh.ohMyZsh.custom = "~/path/to/custom/scripts";
 }
@@ -48,7 +48,7 @@ which bundles completion scripts and a plugin for `oh-my-zsh`.
 
 Rather than using a single mutable path for `ZSH_CUSTOM`,
 it's also possible to generate this path from a list of Nix packages:
-```
+```nix
 { pkgs, ... }:
 {
   programs.zsh.ohMyZsh.customPkgs = [
@@ -89,7 +89,7 @@ If third-party customizations (e.g. new themes) are supposed to be added to
     [upstream repo.](https://github.com/robbyrussell/oh-my-zsh/tree/91b771914bc7c43dd7c7a43b586c5de2c225ceb7/plugins)
 
 A derivation for `oh-my-zsh` may look like this:
-```
+```nix
 { stdenv, fetchFromGitHub }:
 
 stdenv.mkDerivation rec {
diff --git a/nixpkgs/nixos/modules/security/acme/default.md b/nixpkgs/nixos/modules/security/acme/default.md
index 38fbfbf0caec..a6ef2a3fdf18 100644
--- a/nixpkgs/nixos/modules/security/acme/default.md
+++ b/nixpkgs/nixos/modules/security/acme/default.md
@@ -46,33 +46,35 @@ certs are overwritten when the ACME certs arrive. For
 `foo.example.com` the config would look like this:
 
 ```nix
-security.acme.acceptTerms = true;
-security.acme.defaults.email = "admin+acme@example.com";
-services.nginx = {
-  enable = true;
-  virtualHosts = {
-    "foo.example.com" = {
-      forceSSL = true;
-      enableACME = true;
-      # All serverAliases will be added as extra domain names on the certificate.
-      serverAliases = [ "bar.example.com" ];
-      locations."/" = {
-        root = "/var/www";
+{
+  security.acme.acceptTerms = true;
+  security.acme.defaults.email = "admin+acme@example.com";
+  services.nginx = {
+    enable = true;
+    virtualHosts = {
+      "foo.example.com" = {
+        forceSSL = true;
+        enableACME = true;
+        # All serverAliases will be added as extra domain names on the certificate.
+        serverAliases = [ "bar.example.com" ];
+        locations."/" = {
+          root = "/var/www";
+        };
       };
-    };
 
-    # We can also add a different vhost and reuse the same certificate
-    # but we have to append extraDomainNames manually beforehand:
-    # security.acme.certs."foo.example.com".extraDomainNames = [ "baz.example.com" ];
-    "baz.example.com" = {
-      forceSSL = true;
-      useACMEHost = "foo.example.com";
-      locations."/" = {
-        root = "/var/www";
+      # We can also add a different vhost and reuse the same certificate
+      # but we have to append extraDomainNames manually beforehand:
+      # security.acme.certs."foo.example.com".extraDomainNames = [ "baz.example.com" ];
+      "baz.example.com" = {
+        forceSSL = true;
+        useACMEHost = "foo.example.com";
+        locations."/" = {
+          root = "/var/www";
+        };
       };
     };
   };
-};
+}
 ```
 
 ## Using ACME certificates in Apache/httpd {#module-security-acme-httpd}
@@ -89,65 +91,69 @@ the intent that you will generate certs for all your vhosts and redirect
 everyone to HTTPS.
 
 ```nix
-security.acme.acceptTerms = true;
-security.acme.defaults.email = "admin+acme@example.com";
-
-# /var/lib/acme/.challenges must be writable by the ACME user
-# and readable by the Nginx user. The easiest way to achieve
-# this is to add the Nginx user to the ACME group.
-users.users.nginx.extraGroups = [ "acme" ];
-
-services.nginx = {
-  enable = true;
-  virtualHosts = {
-    "acmechallenge.example.com" = {
-      # Catchall vhost, will redirect users to HTTPS for all vhosts
-      serverAliases = [ "*.example.com" ];
-      locations."/.well-known/acme-challenge" = {
-        root = "/var/lib/acme/.challenges";
-      };
-      locations."/" = {
-        return = "301 https://$host$request_uri";
+{
+  security.acme.acceptTerms = true;
+  security.acme.defaults.email = "admin+acme@example.com";
+
+  # /var/lib/acme/.challenges must be writable by the ACME user
+  # and readable by the Nginx user. The easiest way to achieve
+  # this is to add the Nginx user to the ACME group.
+  users.users.nginx.extraGroups = [ "acme" ];
+
+  services.nginx = {
+    enable = true;
+    virtualHosts = {
+      "acmechallenge.example.com" = {
+        # Catchall vhost, will redirect users to HTTPS for all vhosts
+        serverAliases = [ "*.example.com" ];
+        locations."/.well-known/acme-challenge" = {
+          root = "/var/lib/acme/.challenges";
+        };
+        locations."/" = {
+          return = "301 https://$host$request_uri";
+        };
       };
     };
   };
-};
-# Alternative config for Apache
-users.users.wwwrun.extraGroups = [ "acme" ];
-services.httpd = {
-  enable = true;
-  virtualHosts = {
-    "acmechallenge.example.com" = {
-      # Catchall vhost, will redirect users to HTTPS for all vhosts
-      serverAliases = [ "*.example.com" ];
-      # /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
-      # By default, this is the case.
-      documentRoot = "/var/lib/acme/.challenges";
-      extraConfig = ''
-        RewriteEngine On
-        RewriteCond %{HTTPS} off
-        RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
-        RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
-      '';
+  # Alternative config for Apache
+  users.users.wwwrun.extraGroups = [ "acme" ];
+  services.httpd = {
+    enable = true;
+    virtualHosts = {
+      "acmechallenge.example.com" = {
+        # Catchall vhost, will redirect users to HTTPS for all vhosts
+        serverAliases = [ "*.example.com" ];
+        # /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
+        # By default, this is the case.
+        documentRoot = "/var/lib/acme/.challenges";
+        extraConfig = ''
+          RewriteEngine On
+          RewriteCond %{HTTPS} off
+          RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
+          RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
+        '';
+      };
     };
   };
-};
+}
 ```
 
 Now you need to configure ACME to generate a certificate.
 
 ```nix
-security.acme.certs."foo.example.com" = {
-  webroot = "/var/lib/acme/.challenges";
-  email = "foo@example.com";
-  # Ensure that the web server you use can read the generated certs
-  # Take a look at the group option for the web server you choose.
-  group = "nginx";
-  # Since we have a wildcard vhost to handle port 80,
-  # we can generate certs for anything!
-  # Just make sure your DNS resolves them.
-  extraDomainNames = [ "mail.example.com" ];
-};
+{
+  security.acme.certs."foo.example.com" = {
+    webroot = "/var/lib/acme/.challenges";
+    email = "foo@example.com";
+    # Ensure that the web server you use can read the generated certs
+    # Take a look at the group option for the web server you choose.
+    group = "nginx";
+    # Since we have a wildcard vhost to handle port 80,
+    # we can generate certs for anything!
+    # Just make sure your DNS resolves them.
+    extraDomainNames = [ "mail.example.com" ];
+  };
+}
 ```
 
 The private key {file}`key.pem` and certificate
@@ -168,31 +174,33 @@ for provider/server specific configuration values. For the sake of these
 docs, we will provide a fully self-hosted example using bind.
 
 ```nix
-services.bind = {
-  enable = true;
-  extraConfig = ''
-    include "/var/lib/secrets/dnskeys.conf";
-  '';
-  zones = [
-    rec {
-      name = "example.com";
-      file = "/var/db/bind/${name}";
-      master = true;
-      extraConfig = "allow-update { key rfc2136key.example.com.; };";
-    }
-  ];
-};
-
-# Now we can configure ACME
-security.acme.acceptTerms = true;
-security.acme.defaults.email = "admin+acme@example.com";
-security.acme.certs."example.com" = {
-  domain = "*.example.com";
-  dnsProvider = "rfc2136";
-  environmentFile = "/var/lib/secrets/certs.secret";
-  # We don't need to wait for propagation since this is a local DNS server
-  dnsPropagationCheck = false;
-};
+{
+  services.bind = {
+    enable = true;
+    extraConfig = ''
+      include "/var/lib/secrets/dnskeys.conf";
+    '';
+    zones = [
+      rec {
+        name = "example.com";
+        file = "/var/db/bind/${name}";
+        master = true;
+        extraConfig = "allow-update { key rfc2136key.example.com.; };";
+      }
+    ];
+  };
+
+  # Now we can configure ACME
+  security.acme.acceptTerms = true;
+  security.acme.defaults.email = "admin+acme@example.com";
+  security.acme.certs."example.com" = {
+    domain = "*.example.com";
+    dnsProvider = "rfc2136";
+    environmentFile = "/var/lib/secrets/certs.secret";
+    # We don't need to wait for propagation since this is a local DNS server
+    dnsPropagationCheck = false;
+  };
+}
 ```
 
 The {file}`dnskeys.conf` and {file}`certs.secret`
@@ -200,36 +208,38 @@ must be kept secure and thus you should not keep their contents in your
 Nix config. Instead, generate them one time with a systemd service:
 
 ```nix
-systemd.services.dns-rfc2136-conf = {
-  requiredBy = ["acme-example.com.service" "bind.service"];
-  before = ["acme-example.com.service" "bind.service"];
-  unitConfig = {
-    ConditionPathExists = "!/var/lib/secrets/dnskeys.conf";
-  };
-  serviceConfig = {
-    Type = "oneshot";
-    UMask = 0077;
+{
+  systemd.services.dns-rfc2136-conf = {
+    requiredBy = ["acme-example.com.service" "bind.service"];
+    before = ["acme-example.com.service" "bind.service"];
+    unitConfig = {
+      ConditionPathExists = "!/var/lib/secrets/dnskeys.conf";
+    };
+    serviceConfig = {
+      Type = "oneshot";
+      UMask = 0077;
+    };
+    path = [ pkgs.bind ];
+    script = ''
+      mkdir -p /var/lib/secrets
+      chmod 755 /var/lib/secrets
+      tsig-keygen rfc2136key.example.com > /var/lib/secrets/dnskeys.conf
+      chown named:root /var/lib/secrets/dnskeys.conf
+      chmod 400 /var/lib/secrets/dnskeys.conf
+
+      # extract secret value from the dnskeys.conf
+      while read x y; do if [ "$x" = "secret" ]; then secret="''${y:1:''${#y}-3}"; fi; done < /var/lib/secrets/dnskeys.conf
+
+      cat > /var/lib/secrets/certs.secret << EOF
+      RFC2136_NAMESERVER='127.0.0.1:53'
+      RFC2136_TSIG_ALGORITHM='hmac-sha256.'
+      RFC2136_TSIG_KEY='rfc2136key.example.com'
+      RFC2136_TSIG_SECRET='$secret'
+      EOF
+      chmod 400 /var/lib/secrets/certs.secret
+    '';
   };
-  path = [ pkgs.bind ];
-  script = ''
-    mkdir -p /var/lib/secrets
-    chmod 755 /var/lib/secrets
-    tsig-keygen rfc2136key.example.com > /var/lib/secrets/dnskeys.conf
-    chown named:root /var/lib/secrets/dnskeys.conf
-    chmod 400 /var/lib/secrets/dnskeys.conf
-
-    # extract secret value from the dnskeys.conf
-    while read x y; do if [ "$x" = "secret" ]; then secret="''${y:1:''${#y}-3}"; fi; done < /var/lib/secrets/dnskeys.conf
-
-    cat > /var/lib/secrets/certs.secret << EOF
-    RFC2136_NAMESERVER='127.0.0.1:53'
-    RFC2136_TSIG_ALGORITHM='hmac-sha256.'
-    RFC2136_TSIG_KEY='rfc2136key.example.com'
-    RFC2136_TSIG_SECRET='$secret'
-    EOF
-    chmod 400 /var/lib/secrets/certs.secret
-  '';
-};
+}
 ```
 
 Now you're all set to generate certs! You should monitor the first invocation
@@ -251,27 +261,29 @@ you will set them as defaults
 (e.g. [](#opt-security.acme.defaults.dnsProvider)).
 
 ```nix
-# Configure ACME appropriately
-security.acme.acceptTerms = true;
-security.acme.defaults.email = "admin+acme@example.com";
-security.acme.defaults = {
-  dnsProvider = "rfc2136";
-  environmentFile = "/var/lib/secrets/certs.secret";
-  # We don't need to wait for propagation since this is a local DNS server
-  dnsPropagationCheck = false;
-};
-
-# For each virtual host you would like to use DNS-01 validation with,
-# set acmeRoot = null
-services.nginx = {
-  enable = true;
-  virtualHosts = {
-    "foo.example.com" = {
-      enableACME = true;
-      acmeRoot = null;
+{
+  # Configure ACME appropriately
+  security.acme.acceptTerms = true;
+  security.acme.defaults.email = "admin+acme@example.com";
+  security.acme.defaults = {
+    dnsProvider = "rfc2136";
+    environmentFile = "/var/lib/secrets/certs.secret";
+    # We don't need to wait for propagation since this is a local DNS server
+    dnsPropagationCheck = false;
+  };
+
+  # For each virtual host you would like to use DNS-01 validation with,
+  # set acmeRoot = null
+  services.nginx = {
+    enable = true;
+    virtualHosts = {
+      "foo.example.com" = {
+        enableACME = true;
+        acmeRoot = null;
+      };
     };
   };
-};
+}
 ```
 
 And that's it! Next time your configuration is rebuilt, or when
@@ -288,39 +300,41 @@ Below is an example configuration for OpenSMTPD, but this pattern
 can be applied to any service.
 
 ```nix
-# Configure ACME however you like (DNS or HTTP validation), adding
-# the following configuration for the relevant certificate.
-# Note: You cannot use `systemctl reload` here as that would mean
-# the LoadCredential configuration below would be skipped and
-# the service would continue to use old certificates.
-security.acme.certs."mail.example.com".postRun = ''
-  systemctl restart opensmtpd
-'';
-
-# Now you must augment OpenSMTPD's systemd service to load
-# the certificate files.
-systemd.services.opensmtpd.requires = ["acme-finished-mail.example.com.target"];
-systemd.services.opensmtpd.serviceConfig.LoadCredential = let
-  certDir = config.security.acme.certs."mail.example.com".directory;
-in [
-  "cert.pem:${certDir}/cert.pem"
-  "key.pem:${certDir}/key.pem"
-];
-
-# Finally, configure OpenSMTPD to use these certs.
-services.opensmtpd = let
-  credsDir = "/run/credentials/opensmtpd.service";
-in {
-  enable = true;
-  setSendmail = false;
-  serverConfiguration = ''
-    pki mail.example.com cert "${credsDir}/cert.pem"
-    pki mail.example.com key "${credsDir}/key.pem"
-    listen on localhost tls pki mail.example.com
-    action act1 relay host smtp://127.0.0.1:10027
-    match for local action act1
+{
+  # Configure ACME however you like (DNS or HTTP validation), adding
+  # the following configuration for the relevant certificate.
+  # Note: You cannot use `systemctl reload` here as that would mean
+  # the LoadCredential configuration below would be skipped and
+  # the service would continue to use old certificates.
+  security.acme.certs."mail.example.com".postRun = ''
+    systemctl restart opensmtpd
   '';
-};
+
+  # Now you must augment OpenSMTPD's systemd service to load
+  # the certificate files.
+  systemd.services.opensmtpd.requires = ["acme-finished-mail.example.com.target"];
+  systemd.services.opensmtpd.serviceConfig.LoadCredential = let
+    certDir = config.security.acme.certs."mail.example.com".directory;
+  in [
+    "cert.pem:${certDir}/cert.pem"
+    "key.pem:${certDir}/key.pem"
+  ];
+
+  # Finally, configure OpenSMTPD to use these certs.
+  services.opensmtpd = let
+    credsDir = "/run/credentials/opensmtpd.service";
+  in {
+    enable = true;
+    setSendmail = false;
+    serverConfiguration = ''
+      pki mail.example.com cert "${credsDir}/cert.pem"
+      pki mail.example.com key "${credsDir}/key.pem"
+      listen on localhost tls pki mail.example.com
+      action act1 relay host smtp://127.0.0.1:10027
+      match for local action act1
+    '';
+  };
+}
 ```
 
 ## Regenerating certificates {#module-security-acme-regenerate}
diff --git a/nixpkgs/nixos/modules/services/audio/castopod.md b/nixpkgs/nixos/modules/services/audio/castopod.md
index ee8590737a7c..40838cc77aa6 100644
--- a/nixpkgs/nixos/modules/services/audio/castopod.md
+++ b/nixpkgs/nixos/modules/services/audio/castopod.md
@@ -7,16 +7,18 @@ Castopod is an open-source hosting platform made for podcasters who want to enga
 Use the following configuration to start a public instance of Castopod on `castopod.example.com` domain:
 
 ```nix
-networking.firewall.allowedTCPPorts = [ 80 443 ];
-services.castopod = {
-  enable = true;
-  database.createLocally = true;
-  nginx.virtualHost = {
-    serverName = "castopod.example.com";
-    enableACME = true;
-    forceSSL = true;
+{
+  networking.firewall.allowedTCPPorts = [ 80 443 ];
+  services.castopod = {
+    enable = true;
+    database.createLocally = true;
+    nginx.virtualHost = {
+      serverName = "castopod.example.com";
+      enableACME = true;
+      forceSSL = true;
+    };
   };
-};
+}
 ```
 
 Go to `https://castopod.example.com/cp-install` to create superadmin account after applying the above configuration.
diff --git a/nixpkgs/nixos/modules/services/audio/roon-server.nix b/nixpkgs/nixos/modules/services/audio/roon-server.nix
index 8691c08b0d36..8a6cf6ec6a41 100644
--- a/nixpkgs/nixos/modules/services/audio/roon-server.nix
+++ b/nixpkgs/nixos/modules/services/audio/roon-server.nix
@@ -9,6 +9,7 @@ in {
   options = {
     services.roon-server = {
       enable = mkEnableOption (lib.mdDoc "Roon Server");
+      package = lib.mkPackageOption pkgs "roon-server" { };
       openFirewall = mkOption {
         type = types.bool;
         default = false;
@@ -43,7 +44,7 @@ in {
       environment.ROON_ID_DIR = "/var/lib/${name}";
 
       serviceConfig = {
-        ExecStart = "${pkgs.roon-server}/bin/RoonServer";
+        ExecStart = "${lib.getExe cfg.package}";
         LimitNOFILE = 8192;
         User = cfg.user;
         Group = cfg.group;
diff --git a/nixpkgs/nixos/modules/services/backup/borgbackup.md b/nixpkgs/nixos/modules/services/backup/borgbackup.md
index 39141f6ec858..2c91174732e1 100644
--- a/nixpkgs/nixos/modules/services/backup/borgbackup.md
+++ b/nixpkgs/nixos/modules/services/backup/borgbackup.md
@@ -21,22 +21,21 @@ A complete list of options for the Borgbase module may be found
 ## Basic usage for a local backup {#opt-services-backup-borgbackup-local-directory}
 
 A very basic configuration for backing up to a locally accessible directory is:
-```
+```nix
 {
     opt.services.borgbackup.jobs = {
-      { rootBackup = {
-          paths = "/";
-          exclude = [ "/nix" "/path/to/local/repo" ];
-          repo = "/path/to/local/repo";
-          doInit = true;
-          encryption = {
-            mode = "repokey";
-            passphrase = "secret";
-          };
-          compression = "auto,lzma";
-          startAt = "weekly";
+      rootBackup = {
+        paths = "/";
+        exclude = [ "/nix" "/path/to/local/repo" ];
+        repo = "/path/to/local/repo";
+        doInit = true;
+        encryption = {
+          mode = "repokey";
+          passphrase = "secret";
         };
-      }
+        compression = "auto,lzma";
+        startAt = "weekly";
+      };
     };
 }
 ```
@@ -59,7 +58,7 @@ ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/
 ```
 
 Add the following snippet to your NixOS configuration:
-```
+```nix
 {
   services.borgbackup.repos = {
     my_borg_repo = {
@@ -80,7 +79,7 @@ that you have stored a secret passphrasse in the file
 {file}`/run/keys/borgbackup_passphrase`, which should be only
 accessible by root
 
-```
+```nix
 {
   services.borgbackup.jobs = {
     backupToLocalServer = {
@@ -96,7 +95,7 @@ accessible by root
       startAt = "hourly";
     };
   };
-};
+}
 ```
 
 The following few commands (run as root) let you test your backup.
diff --git a/nixpkgs/nixos/modules/services/databases/foundationdb.md b/nixpkgs/nixos/modules/services/databases/foundationdb.md
index 0815c139152f..9f7addc9c140 100644
--- a/nixpkgs/nixos/modules/services/databases/foundationdb.md
+++ b/nixpkgs/nixos/modules/services/databases/foundationdb.md
@@ -15,9 +15,11 @@ key-value store.
 
 To enable FoundationDB, add the following to your
 {file}`configuration.nix`:
-```
-services.foundationdb.enable = true;
-services.foundationdb.package = pkgs.foundationdb71; # FoundationDB 7.1.x
+```nix
+{
+  services.foundationdb.enable = true;
+  services.foundationdb.package = pkgs.foundationdb71; # FoundationDB 7.1.x
+}
 ```
 
 The {option}`services.foundationdb.package` option is required, and
@@ -109,8 +111,10 @@ default configuration. See below for more on scaling to increase this.
 FoundationDB stores all data for all server processes under
 {file}`/var/lib/foundationdb`. You can override this using
 {option}`services.foundationdb.dataDir`, e.g.
-```
-services.foundationdb.dataDir = "/data/fdb";
+```nix
+{
+  services.foundationdb.dataDir = "/data/fdb";
+}
 ```
 
 Similarly, logs are stored under {file}`/var/log/foundationdb`
@@ -265,8 +269,10 @@ directories.
 For example, to create backups in {command}`/opt/fdb-backups`, first
 set up the paths in the module options:
 
-```
-services.foundationdb.extraReadWritePaths = [ "/opt/fdb-backups" ];
+```nix
+{
+  services.foundationdb.extraReadWritePaths = [ "/opt/fdb-backups" ];
+}
 ```
 
 Restart the FoundationDB service, and it will now be able to write to this
diff --git a/nixpkgs/nixos/modules/services/databases/postgresql.md b/nixpkgs/nixos/modules/services/databases/postgresql.md
index 3ff1f00fa9cf..6cce8f542a53 100644
--- a/nixpkgs/nixos/modules/services/databases/postgresql.md
+++ b/nixpkgs/nixos/modules/services/databases/postgresql.md
@@ -15,9 +15,11 @@ PostgreSQL is an advanced, free relational database.
 ## Configuring {#module-services-postgres-configuring}
 
 To enable PostgreSQL, add the following to your {file}`configuration.nix`:
-```
-services.postgresql.enable = true;
-services.postgresql.package = pkgs.postgresql_15;
+```nix
+{
+  services.postgresql.enable = true;
+  services.postgresql.package = pkgs.postgresql_15;
+}
 ```
 Note that you are required to specify the desired version of PostgreSQL (e.g. `pkgs.postgresql_15`). Since upgrading your PostgreSQL version requires a database dump and reload (see below), NixOS cannot provide a default value for [](#opt-services.postgresql.package) such as the most recent release of PostgreSQL.
 
@@ -35,8 +37,10 @@ alice=>
 -->
 
 By default, PostgreSQL stores its databases in {file}`/var/lib/postgresql/$psqlSchema`. You can override this using [](#opt-services.postgresql.dataDir), e.g.
-```
-services.postgresql.dataDir = "/data/postgresql";
+```nix
+{
+  services.postgresql.dataDir = "/data/postgresql";
+}
 ```
 
 ## Initializing {#module-services-postgres-initializing}
@@ -95,16 +99,19 @@ databases from `ensureDatabases` and `extraUser1` from `ensureUsers`
 are already created.
 
 ```nix
+  {
     systemd.services.postgresql.postStart = lib.mkAfter ''
       $PSQL service1 -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
       $PSQL service1 -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
       # ....
     '';
+  }
 ```
 
 ##### in intermediate oneshot service {#module-services-postgres-initializing-extra-permissions-superuser-oneshot}
 
 ```nix
+  {
     systemd.services."migrate-service1-db1" = {
       serviceConfig.Type = "oneshot";
       requiredBy = "service1.service";
@@ -119,6 +126,7 @@ are already created.
         # ....
       '';
     };
+  }
 ```
 
 #### as service user {#module-services-postgres-initializing-extra-permissions-service-user}
@@ -130,6 +138,7 @@ are already created.
 ##### in service `preStart` {#module-services-postgres-initializing-extra-permissions-service-user-pre-start}
 
 ```nix
+  {
     environment.PSQL = "psql --port=${toString services.postgresql.port}";
     path = [ postgresql ];
     systemd.services."service1".preStart = ''
@@ -137,11 +146,13 @@ are already created.
       $PSQL -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
       # ....
     '';
+  }
 ```
 
 ##### in intermediate oneshot service {#module-services-postgres-initializing-extra-permissions-service-user-oneshot}
 
 ```nix
+  {
     systemd.services."migrate-service1-db1" = {
       serviceConfig.Type = "oneshot";
       requiredBy = "service1.service";
@@ -156,6 +167,7 @@ are already created.
         # ....
       '';
     };
+  }
 ```
 
 ## Upgrading {#module-services-postgres-upgrading}
@@ -174,7 +186,7 @@ $ nix-instantiate --eval -A postgresql_13.psqlSchema
 "13"
 ```
 For an upgrade, a script like this can be used to simplify the process:
-```
+```nix
 { config, pkgs, ... }:
 {
   environment.systemPackages = [
@@ -256,16 +268,18 @@ postgresql_15.pkgs.pg_partman        postgresql_15.pkgs.pgroonga
 ```
 
 To add plugins via NixOS configuration, set `services.postgresql.extraPlugins`:
-```
-services.postgresql.package = pkgs.postgresql_12;
-services.postgresql.extraPlugins = ps: with ps; [
-  pg_repack
-  postgis
-];
+```nix
+{
+  services.postgresql.package = pkgs.postgresql_12;
+  services.postgresql.extraPlugins = ps: with ps; [
+    pg_repack
+    postgis
+  ];
+}
 ```
 
 You can build custom PostgreSQL-with-plugins (to be used outside of NixOS) using function `.withPackages`. For example, creating a custom PostgreSQL package in an overlay can look like:
-```
+```nix
 self: super: {
   postgresql_custom = self.postgresql_12.withPackages (ps: [
     ps.pg_repack
@@ -275,7 +289,7 @@ self: super: {
 ```
 
 Here's a recipe on how to override a particular plugin through an overlay:
-```
+```nix
 self: super: {
   postgresql_15 = super.postgresql_15// {
     pkgs = super.postgresql_15.pkgs // {
diff --git a/nixpkgs/nixos/modules/services/databases/tigerbeetle.md b/nixpkgs/nixos/modules/services/databases/tigerbeetle.md
index 47394d443059..12d920e7bcc7 100644
--- a/nixpkgs/nixos/modules/services/databases/tigerbeetle.md
+++ b/nixpkgs/nixos/modules/services/databases/tigerbeetle.md
@@ -7,8 +7,10 @@
 TigerBeetle is a distributed financial accounting database designed for mission critical safety and performance.
 
 To enable TigerBeetle, add the following to your {file}`configuration.nix`:
-```
+```nix
+{
   services.tigerbeetle.enable = true;
+}
 ```
 
 When first started, the TigerBeetle service will create its data file at {file}`/var/lib/tigerbeetle` unless the file already exists, in which case it will just use the existing file.
@@ -20,13 +22,15 @@ By default, TigerBeetle will only listen on a local interface.
 To configure it to listen on a different interface (and to configure it to connect to other replicas, if you're creating more than one), you'll have to set the `addresses` option.
 Note that the TigerBeetle module won't open any firewall ports automatically, so if you configure it to listen on an external interface, you'll need to ensure that connections can reach it:
 
-```
+```nix
+{
   services.tigerbeetle = {
     enable = true;
     addresses = [ "0.0.0.0:3001" ];
   };
 
   networking.firewall.allowedTCPPorts = [ 3001 ];
+}
 ```
 
 A complete list of options for TigerBeetle can be found [here](#opt-services.tigerbeetle.enable).
diff --git a/nixpkgs/nixos/modules/services/desktops/flatpak.md b/nixpkgs/nixos/modules/services/desktops/flatpak.md
index af71d85b5a15..5299b32a03c7 100644
--- a/nixpkgs/nixos/modules/services/desktops/flatpak.md
+++ b/nixpkgs/nixos/modules/services/desktops/flatpak.md
@@ -8,17 +8,21 @@ Flatpak is a system for building, distributing, and running sandboxed desktop
 applications on Linux.
 
 To enable Flatpak, add the following to your {file}`configuration.nix`:
-```
+```nix
+{
   services.flatpak.enable = true;
+}
 ```
 
 For the sandboxed apps to work correctly, desktop integration portals need to
 be installed. If you run GNOME, this will be handled automatically for you;
 in other cases, you will need to add something like the following to your
 {file}`configuration.nix`:
-```
+```nix
+{
   xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
   xdg.portal.config.common.default = "gtk";
+}
 ```
 
 Then, you will need to add a repository, for example,
diff --git a/nixpkgs/nixos/modules/services/desktops/flatpak.nix b/nixpkgs/nixos/modules/services/desktops/flatpak.nix
index 4c26e6874023..62ef38a3d554 100644
--- a/nixpkgs/nixos/modules/services/desktops/flatpak.nix
+++ b/nixpkgs/nixos/modules/services/desktops/flatpak.nix
@@ -32,6 +32,8 @@ in {
 
     security.polkit.enable = true;
 
+    fonts.fontDir.enable = true;
+
     services.dbus.packages = [ pkgs.flatpak ];
 
     systemd.packages = [ pkgs.flatpak ];
diff --git a/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix b/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix
index 182615cd4d6c..5c6eba889ebe 100644
--- a/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix
+++ b/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix
@@ -1,14 +1,21 @@
 # PipeWire service.
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (builtins) attrNames concatMap length;
+  inherit (lib) maintainers teams;
+  inherit (lib.attrsets) attrByPath attrsToList concatMapAttrs filterAttrs;
+  inherit (lib.lists) flatten optional optionals;
+  inherit (lib.modules) mkIf mkRemovedOptionModule;
+  inherit (lib.options) literalExpression mkEnableOption mkOption mkPackageOption;
+  inherit (lib.strings) concatMapStringsSep hasPrefix optionalString;
+  inherit (lib.types) attrsOf bool listOf package;
+
   json = pkgs.formats.json {};
   mapToFiles = location: config: concatMapAttrs (name: value: { "share/pipewire/${location}.conf.d/${name}.conf" = json.generate "${name}" value; }) config;
   extraConfigPkgFromFiles = locations: filesSet: pkgs.runCommand "pipewire-extra-config" { } ''
-    mkdir -p ${lib.concatMapStringsSep " " (l: "$out/share/pipewire/${l}.conf.d") locations}
-    ${lib.concatMapStringsSep ";" ({name, value}: "ln -s ${value} $out/${name}") (lib.attrsToList filesSet)}
+    mkdir -p ${concatMapStringsSep " " (l: "$out/share/pipewire/${l}.conf.d") locations}
+    ${concatMapStringsSep ";" ({name, value}: "ln -s ${value} $out/${name}") (attrsToList filesSet)}
   '';
   cfg = config.services.pipewire;
   enable32BitAlsaPlugins = cfg.alsa.support32Bit
@@ -40,15 +47,15 @@ let
     name = "pipewire-configs";
     paths = configPackages
       ++ [ extraConfigPkg ]
-      ++ lib.optionals cfg.wireplumber.enable cfg.wireplumber.configPackages;
+      ++ optionals cfg.wireplumber.enable cfg.wireplumber.configPackages;
     pathsToLink = [ "/share/pipewire" ];
   };
 
-  requiredLv2Packages = lib.flatten
+  requiredLv2Packages = flatten
     (
-      lib.concatMap
+      concatMap
       (p:
-        lib.attrByPath ["passthru" "requiredLv2Packages"] [] p
+        attrByPath ["passthru" "requiredLv2Packages"] [] p
       )
       configPackages
     );
@@ -59,58 +66,58 @@ let
     pathsToLink = [ "/lib/lv2" ];
   };
 in {
-  meta.maintainers = teams.freedesktop.members ++ [ lib.maintainers.k900 ];
+  meta.maintainers = teams.freedesktop.members ++ [ maintainers.k900 ];
 
   ###### interface
   options = {
     services.pipewire = {
-      enable = mkEnableOption (lib.mdDoc "PipeWire service");
+      enable = mkEnableOption "PipeWire service";
 
       package = mkPackageOption pkgs "pipewire" { };
 
       socketActivation = mkOption {
         default = true;
-        type = types.bool;
-        description = lib.mdDoc ''
+        type = bool;
+        description = ''
           Automatically run PipeWire when connections are made to the PipeWire socket.
         '';
       };
 
       audio = {
-        enable = lib.mkOption {
-          type = lib.types.bool;
+        enable = mkOption {
+          type = bool;
           # this is for backwards compatibility
           default = cfg.alsa.enable || cfg.jack.enable || cfg.pulse.enable;
-          defaultText = lib.literalExpression "config.services.pipewire.alsa.enable || config.services.pipewire.jack.enable || config.services.pipewire.pulse.enable";
-          description = lib.mdDoc "Whether to use PipeWire as the primary sound server";
+          defaultText = literalExpression "config.services.pipewire.alsa.enable || config.services.pipewire.jack.enable || config.services.pipewire.pulse.enable";
+          description = "Whether to use PipeWire as the primary sound server";
         };
       };
 
       alsa = {
-        enable = mkEnableOption (lib.mdDoc "ALSA support");
-        support32Bit = mkEnableOption (lib.mdDoc "32-bit ALSA support on 64-bit systems");
+        enable = mkEnableOption "ALSA support";
+        support32Bit = mkEnableOption "32-bit ALSA support on 64-bit systems";
       };
 
       jack = {
-        enable = mkEnableOption (lib.mdDoc "JACK audio emulation");
+        enable = mkEnableOption "JACK audio emulation";
       };
 
       raopOpenFirewall = mkOption {
-        type = lib.types.bool;
+        type = bool;
         default = false;
-        description = lib.mdDoc ''
+        description = ''
           Opens UDP/6001-6002, required by RAOP/Airplay for timing and control data.
         '';
       };
 
       pulse = {
-        enable = mkEnableOption (lib.mdDoc "PulseAudio server emulation");
+        enable = mkEnableOption "PulseAudio server emulation";
       };
 
-      systemWide = lib.mkOption {
-        type = lib.types.bool;
+      systemWide = mkOption {
+        type = bool;
         default = false;
-        description = lib.mdDoc ''
+        description = ''
           If true, a system-wide PipeWire service and socket is enabled
           allowing all users in the "pipewire" group to use it simultaneously.
           If false, then user units are used instead, restricting access to
@@ -124,7 +131,7 @@ in {
 
       extraConfig = {
         pipewire = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-clock-rate" = {
@@ -138,7 +145,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire server.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire.conf.d`.
@@ -157,7 +164,7 @@ in {
           '';
         };
         client = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-no-resample" = {
@@ -166,7 +173,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire client library, used by most applications.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client.conf.d`.
@@ -177,7 +184,7 @@ in {
           '';
         };
         client-rt = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-alsa-linear-volume" = {
@@ -186,7 +193,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire client library, used by real-time applications and legacy ALSA clients.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client-rt.conf.d`.
@@ -198,7 +205,7 @@ in {
           '';
         };
         jack = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "20-hide-midi" = {
@@ -207,7 +214,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire JACK server and client library.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/jack.conf.d`.
@@ -218,7 +225,7 @@ in {
           '';
         };
         pipewire-pulse = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "15-force-s16-info" = {
@@ -232,7 +239,7 @@ in {
               }];
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire PulseAudio server.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire-pulse.conf.d`.
@@ -248,10 +255,32 @@ in {
         };
       };
 
-      configPackages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      configPackages = mkOption {
+        type = listOf package;
         default = [];
-        description = lib.mdDoc ''
+        example = literalExpression ''[
+          (pkgs.writeTextDir "share/pipewire/pipewire.conf.d/10-loopback.conf" '''
+            context.modules = [
+            {   name = libpipewire-module-loopback
+                args = {
+                  node.description = "Scarlett Focusrite Line 1"
+                  capture.props = {
+                      audio.position = [ FL ]
+                      stream.dont-remix = true
+                      node.target = "alsa_input.usb-Focusrite_Scarlett_Solo_USB_Y7ZD17C24495BC-00.analog-stereo"
+                      node.passive = true
+                  }
+                  playback.props = {
+                      node.name = "SF_mono_in_1"
+                      media.class = "Audio/Source"
+                      audio.position = [ MONO ]
+                  }
+                }
+            }
+            ]
+          ''')
+        ]'';
+        description = ''
           List of packages that provide PipeWire configuration, in the form of
           `share/pipewire/*/*.conf` files.
 
@@ -260,11 +289,11 @@ in {
         '';
       };
 
-      extraLv2Packages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      extraLv2Packages = mkOption {
+        type = listOf package;
         default = [];
-        example = lib.literalExpression "[ pkgs.lsp-plugins ]";
-        description = lib.mdDoc ''
+        example = literalExpression "[ pkgs.lsp-plugins ]";
+        description = ''
           List of packages that provide LV2 plugins in `lib/lv2` that should
           be made available to PipeWire for [filter chains][wiki-filter-chain].
 
@@ -279,11 +308,11 @@ in {
   };
 
   imports = [
-    (lib.mkRemovedOptionModule ["services" "pipewire" "config"] ''
+    (mkRemovedOptionModule ["services" "pipewire" "config"] ''
       Overriding default PipeWire configuration through NixOS options never worked correctly and is no longer supported.
       Please create drop-in configuration files via `services.pipewire.extraConfig` instead.
     '')
-    (lib.mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
+    (mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
       pipewire-media-session is no longer supported upstream and has been removed.
       Please switch to `services.pipewire.wireplumber` instead.
     '')
@@ -306,12 +335,12 @@ in {
         message = "Using PipeWire's ALSA/PulseAudio compatibility layers requires running PipeWire as the sound server. Set `services.pipewire.audio.enable` to true.";
       }
       {
-        assertion = builtins.length
-          (builtins.attrNames
+        assertion = length
+          (attrNames
             (
-              lib.filterAttrs
+              filterAttrs
                 (name: value:
-                  lib.hasPrefix "pipewire/" name || name == "pipewire"
+                  hasPrefix "pipewire/" name || name == "pipewire"
                 )
                 config.environment.etc
             )) == 1;
@@ -320,7 +349,7 @@ in {
     ];
 
     environment.systemPackages = [ cfg.package ]
-                                 ++ lib.optional cfg.jack.enable jack-libs;
+                                 ++ optional cfg.jack.enable jack-libs;
 
     systemd.packages = [ cfg.package ];
 
@@ -336,16 +365,16 @@ in {
     systemd.user.sockets.pipewire.enable = !cfg.systemWide;
     systemd.user.services.pipewire.enable = !cfg.systemWide;
 
-    systemd.services.pipewire.environment.LV2_PATH = lib.mkIf cfg.systemWide "${lv2Plugins}/lib/lv2";
-    systemd.user.services.pipewire.environment.LV2_PATH = lib.mkIf (!cfg.systemWide) "${lv2Plugins}/lib/lv2";
+    systemd.services.pipewire.environment.LV2_PATH = mkIf cfg.systemWide "${lv2Plugins}/lib/lv2";
+    systemd.user.services.pipewire.environment.LV2_PATH = mkIf (!cfg.systemWide) "${lv2Plugins}/lib/lv2";
 
     # Mask pw-pulse if it's not wanted
     systemd.user.services.pipewire-pulse.enable = cfg.pulse.enable;
     systemd.user.sockets.pipewire-pulse.enable = cfg.pulse.enable;
 
-    systemd.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
-    systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
-    systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.sockets.pipewire.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.user.sockets.pipewire.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.user.sockets.pipewire-pulse.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
 
     services.udev.packages = [ cfg.package ];
 
@@ -377,18 +406,18 @@ in {
     };
 
     environment.sessionVariables.LD_LIBRARY_PATH =
-      lib.mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
+      mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
 
-    networking.firewall.allowedUDPPorts = lib.mkIf cfg.raopOpenFirewall [ 6001 6002 ];
+    networking.firewall.allowedUDPPorts = mkIf cfg.raopOpenFirewall [ 6001 6002 ];
 
-    users = lib.mkIf cfg.systemWide {
+    users = mkIf cfg.systemWide {
       users.pipewire = {
         uid = config.ids.uids.pipewire;
         group = "pipewire";
         extraGroups = [
           "audio"
           "video"
-        ] ++ lib.optional config.security.rtkit.enable "rtkit";
+        ] ++ optional config.security.rtkit.enable "rtkit";
         description = "PipeWire system service user";
         isSystemUser = true;
         home = "/var/lib/pipewire";
diff --git a/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix b/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix
index de177d0e4ef3..6ab62eb03c25 100644
--- a/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix
+++ b/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix
@@ -1,46 +1,65 @@
 { config, lib, pkgs, ... }:
 
 let
+  inherit (builtins) attrNames concatMap length;
+  inherit (lib) maintainers;
+  inherit (lib.attrsets) attrByPath filterAttrs;
+  inherit (lib.lists) flatten optional;
+  inherit (lib.modules) mkIf;
+  inherit (lib.options) literalExpression mkOption;
+  inherit (lib.strings) hasPrefix;
+  inherit (lib.types) bool listOf package;
+
   pwCfg = config.services.pipewire;
   cfg = pwCfg.wireplumber;
   pwUsedForAudio = pwCfg.audio.enable;
 in
 {
-  meta.maintainers = [ lib.maintainers.k900 ];
+  meta.maintainers = [ maintainers.k900 ];
 
   options = {
     services.pipewire.wireplumber = {
-      enable = lib.mkOption {
-        type = lib.types.bool;
-        default = config.services.pipewire.enable;
-        defaultText = lib.literalExpression "config.services.pipewire.enable";
-        description = lib.mdDoc "Whether to enable WirePlumber, a modular session / policy manager for PipeWire";
+      enable = mkOption {
+        type = bool;
+        default = pwCfg.enable;
+        defaultText = literalExpression "config.services.pipewire.enable";
+        description = "Whether to enable WirePlumber, a modular session / policy manager for PipeWire";
       };
 
-      package = lib.mkOption {
-        type = lib.types.package;
+      package = mkOption {
+        type = package;
         default = pkgs.wireplumber;
-        defaultText = lib.literalExpression "pkgs.wireplumber";
-        description = lib.mdDoc "The WirePlumber derivation to use.";
+        defaultText = literalExpression "pkgs.wireplumber";
+        description = "The WirePlumber derivation to use.";
       };
 
-      configPackages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      configPackages = mkOption {
+        type = listOf package;
         default = [ ];
-        description = lib.mdDoc ''
+        example = literalExpression ''[
+          (pkgs.writeTextDir "share/wireplumber/wireplumber.conf.d/10-bluez.conf" '''
+            monitor.bluez.properties = {
+              bluez5.roles = [ a2dp_sink a2dp_source bap_sink bap_source hsp_hs hsp_ag hfp_hf hfp_ag ]
+              bluez5.codecs = [ sbc sbc_xq aac ]
+              bluez5.enable-sbc-xq = true
+              bluez5.hfphsp-backend = "native"
+            }
+          ''')
+        ]'';
+        description = ''
           List of packages that provide WirePlumber configuration, in the form of
-          `share/wireplumber/*/*.lua` files.
+          `share/wireplumber/*/*.conf` files.
 
           LV2 dependencies will be picked up from config packages automatically
           via `passthru.requiredLv2Packages`.
         '';
       };
 
-      extraLv2Packages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      extraLv2Packages = mkOption {
+        type = listOf package;
         default = [];
-        example = lib.literalExpression "[ pkgs.lsp-plugins ]";
-        description = lib.mdDoc ''
+        example = literalExpression "[ pkgs.lsp-plugins ]";
+        description = ''
           List of packages that provide LV2 plugins in `lib/lv2` that should
           be made available to WirePlumber for [filter chains][wiki-filter-chain].
 
@@ -78,8 +97,8 @@ in
       '';
 
       configPackages = cfg.configPackages
-          ++ lib.optional (!pwUsedForAudio) pwNotForAudioConfigPkg
-          ++ lib.optional config.services.pipewire.systemWide systemwideConfigPkg;
+          ++ optional (!pwUsedForAudio) pwNotForAudioConfigPkg
+          ++ optional pwCfg.systemWide systemwideConfigPkg;
 
       configs = pkgs.buildEnv {
         name = "wireplumber-configs";
@@ -87,11 +106,11 @@ in
         pathsToLink = [ "/share/wireplumber" ];
       };
 
-      requiredLv2Packages = lib.flatten
+      requiredLv2Packages = flatten
         (
-          lib.concatMap
+          concatMap
             (p:
-              lib.attrByPath ["passthru" "requiredLv2Packages"] [] p
+              attrByPath ["passthru" "requiredLv2Packages"] [] p
             )
             configPackages
         );
@@ -102,19 +121,19 @@ in
         pathsToLink = [ "/lib/lv2" ];
       };
     in
-    lib.mkIf cfg.enable {
+    mkIf cfg.enable {
       assertions = [
         {
           assertion = !config.hardware.bluetooth.hsphfpd.enable;
           message = "Using WirePlumber conflicts with hsphfpd, as it provides the same functionality. `hardware.bluetooth.hsphfpd.enable` needs be set to false";
         }
         {
-          assertion = builtins.length
-            (builtins.attrNames
+          assertion = length
+            (attrNames
               (
-                lib.filterAttrs
+                filterAttrs
                   (name: value:
-                    lib.hasPrefix "wireplumber/" name || name == "wireplumber"
+                    hasPrefix "wireplumber/" name || name == "wireplumber"
                   )
                   config.environment.etc
               )) == 1;
@@ -128,19 +147,19 @@ in
 
       systemd.packages = [ cfg.package ];
 
-      systemd.services.wireplumber.enable = config.services.pipewire.systemWide;
-      systemd.user.services.wireplumber.enable = !config.services.pipewire.systemWide;
+      systemd.services.wireplumber.enable = pwCfg.systemWide;
+      systemd.user.services.wireplumber.enable = !pwCfg.systemWide;
 
       systemd.services.wireplumber.wantedBy = [ "pipewire.service" ];
       systemd.user.services.wireplumber.wantedBy = [ "pipewire.service" ];
 
-      systemd.services.wireplumber.environment = lib.mkIf config.services.pipewire.systemWide {
+      systemd.services.wireplumber.environment = mkIf pwCfg.systemWide {
         # Force WirePlumber to use system dbus.
         DBUS_SESSION_BUS_ADDRESS = "unix:path=/run/dbus/system_bus_socket";
         LV2_PATH = "${lv2Plugins}/lib/lv2";
       };
 
       systemd.user.services.wireplumber.environment.LV2_PATH =
-        lib.mkIf (!config.services.pipewire.systemWide) "${lv2Plugins}/lib/lv2";
+        mkIf (!pwCfg.systemWide) "${lv2Plugins}/lib/lv2";
     };
 }
diff --git a/nixpkgs/nixos/modules/services/development/athens.md b/nixpkgs/nixos/modules/services/development/athens.md
index 77663db509d5..2795930b0a02 100644
--- a/nixpkgs/nixos/modules/services/development/athens.md
+++ b/nixpkgs/nixos/modules/services/development/athens.md
@@ -18,7 +18,7 @@ A complete list of options for the Athens module may be found
 ## Basic usage for a caching proxy configuration {#opt-services-development-athens-caching-proxy}
 
 A very basic configuration for Athens that acts as a caching and forwarding HTTP proxy is:
-```
+```nix
 {
     services.athens = {
       enable = true;
@@ -28,7 +28,7 @@ A very basic configuration for Athens that acts as a caching and forwarding HTTP
 
 If you want to prevent Athens from writing to disk, you can instead configure it to cache modules only in memory:
 
-```
+```nix
 {
     services.athens = {
       enable = true;
@@ -39,10 +39,10 @@ If you want to prevent Athens from writing to disk, you can instead configure it
 
 To use the local proxy in Go builds, you can set the proxy as environment variable:
 
-```
+```nix
 {
   environment.variables = {
-    GOPROXY = "http://localhost:3000"
+    GOPROXY = "http://localhost:3000";
   };
 }
 ```
diff --git a/nixpkgs/nixos/modules/services/development/blackfire.md b/nixpkgs/nixos/modules/services/development/blackfire.md
index e2e7e4780c79..5a7fbe68f7d2 100644
--- a/nixpkgs/nixos/modules/services/development/blackfire.md
+++ b/nixpkgs/nixos/modules/services/development/blackfire.md
@@ -7,7 +7,7 @@
 [Blackfire](https://blackfire.io) is a proprietary tool for profiling applications. There are several languages supported by the product but currently only PHP support is packaged in Nixpkgs. The back-end consists of a module that is loaded into the language runtime (called *probe*) and a service (*agent*) that the probe connects to and that sends the profiles to the server.
 
 To use it, you will need to enable the agent and the probe on your server. The exact method will depend on the way you use PHP but here is an example of NixOS configuration for PHP-FPM:
-```
+```nix
 let
   php = pkgs.php.withExtensions ({ enabled, all }: enabled ++ (with all; [
     blackfire
diff --git a/nixpkgs/nixos/modules/services/development/livebook.md b/nixpkgs/nixos/modules/services/development/livebook.md
index 5315f2c2755a..aac9c58d081c 100644
--- a/nixpkgs/nixos/modules/services/development/livebook.md
+++ b/nixpkgs/nixos/modules/services/development/livebook.md
@@ -9,7 +9,7 @@ Enabling the `livebook` service creates a user
 [`systemd`](https://www.freedesktop.org/wiki/Software/systemd/) unit
 which runs the server.
 
-```
+```nix
 { ... }:
 
 {
@@ -51,6 +51,8 @@ some features require additional packages.  For example, the machine
 learning Kinos require `gcc` and `gnumake`.  To add these, use
 `extraPackages`:
 
-```
-services.livebook.extraPackages = with pkgs; [ gcc gnumake ];
+```nix
+{
+  services.livebook.extraPackages = with pkgs; [ gcc gnumake ];
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/editors/emacs.md b/nixpkgs/nixos/modules/services/editors/emacs.md
index 02f47b098d86..885f927422bd 100644
--- a/nixpkgs/nixos/modules/services/editors/emacs.md
+++ b/nixpkgs/nixos/modules/services/editors/emacs.md
@@ -178,7 +178,7 @@ file {file}`configuration.nix` to make it contain:
 ::: {.example #module-services-emacs-configuration-nix}
 ### Custom Emacs in `configuration.nix`
 
-```
+```nix
 {
  environment.systemPackages = [
    # [...]
@@ -203,7 +203,7 @@ adding it to your {file}`~/.config/nixpkgs/config.nix` (see
 ::: {.example #module-services-emacs-config-nix}
 ### Custom Emacs in `~/.config/nixpkgs/config.nix`
 
-```
+```nix
 {
   packageOverrides = super: let self = super.pkgs; in {
     myemacs = import ./emacs.nix { pkgs = self; };
@@ -228,7 +228,7 @@ only use {command}`emacsclient`), you can change your file
 ::: {.example #ex-emacsGtk3Nix}
 ### Custom Emacs build
 
-```
+```nix
 { pkgs ? import <nixpkgs> {} }:
 let
   myEmacs = (pkgs.emacs.override {
@@ -242,7 +242,7 @@ let
       rm $out/share/applications/emacs.desktop
     '';
   });
-in [...]
+in [ /* ... */ ]
 ```
 :::
 
@@ -262,8 +262,10 @@ with the user's login session.
 
 To install and enable the {command}`systemd` user service for Emacs
 daemon, add the following to your {file}`configuration.nix`:
-```
-services.emacs.enable = true;
+```nix
+{
+  services.emacs.enable = true;
+}
 ```
 
 The {var}`services.emacs.package` option allows a custom
@@ -323,9 +325,11 @@ In general, {command}`systemd` user services are globally enabled
 by symlinks in {file}`/etc/systemd/user`. In the case where
 Emacs daemon is not wanted for all users, it is possible to install the
 service but not globally enable it:
-```
-services.emacs.enable = false;
-services.emacs.install = true;
+```nix
+{
+  services.emacs.enable = false;
+  services.emacs.install = true;
+}
 ```
 
 To enable the {command}`systemd` user service for just the
diff --git a/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix
index d29e0f542f55..a4f93221475d 100644
--- a/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix
+++ b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix
@@ -94,7 +94,7 @@ in
       { source = "${etcFiles}/etc/opt/brother/scanner/brscan5"; };
     environment.etc."opt/brother/scanner/models" =
       { source = "${etcFiles}/etc/opt/brother/scanner/brscan5/models"; };
-    environment.etc."sane.d/dll.d/brother5.conf".source = "${pkgs.brscan5}/etc/sane.d/dll.d/brother.conf";
+    environment.etc."sane.d/dll.d/brother5.conf".source = "${pkgs.brscan5}/etc/sane.d/dll.d/brother5.conf";
 
     assertions = [
       { assertion = all (x: !(null != x.ip && null != x.nodename)) netDeviceList;
diff --git a/nixpkgs/nixos/modules/services/mail/mailman.md b/nixpkgs/nixos/modules/services/mail/mailman.md
index 55b61f8a2582..446aa1f921b6 100644
--- a/nixpkgs/nixos/modules/services/mail/mailman.md
+++ b/nixpkgs/nixos/modules/services/mail/mailman.md
@@ -9,7 +9,7 @@ an existing, securely configured Postfix setup, as it does not automatically con
 ## Basic usage with Postfix {#module-services-mailman-basic-usage}
 
 For a basic configuration with Postfix as the MTA, the following settings are suggested:
-```
+```nix
 { config, ... }: {
   services.postfix = {
     enable = true;
@@ -50,7 +50,7 @@ necessary, but outside the scope of the Mailman module.
 ## Using with other MTAs {#module-services-mailman-other-mtas}
 
 Mailman also supports other MTA, though with a little bit more configuration. For example, to use Mailman with Exim, you can use the following settings:
-```
+```nix
 { config, ... }: {
   services = {
     mailman = {
diff --git a/nixpkgs/nixos/modules/services/matrix/maubot.md b/nixpkgs/nixos/modules/services/matrix/maubot.md
index f6a05db56caf..d49066057a23 100644
--- a/nixpkgs/nixos/modules/services/matrix/maubot.md
+++ b/nixpkgs/nixos/modules/services/matrix/maubot.md
@@ -10,7 +10,9 @@ framework for Matrix.
 2. If you want to use PostgreSQL instead of SQLite, do this:
 
    ```nix
-   services.maubot.settings.database = "postgresql://maubot@localhost/maubot";
+   {
+     services.maubot.settings.database = "postgresql://maubot@localhost/maubot";
+   }
    ```
 
    If the PostgreSQL connection requires a password, you will have to
@@ -18,54 +20,58 @@ framework for Matrix.
 3. If you plan to expose your Maubot interface to the web, do something
    like this:
    ```nix
-   services.nginx.virtualHosts."matrix.example.org".locations = {
-     "/_matrix/maubot/" = {
-       proxyPass = "http://127.0.0.1:${toString config.services.maubot.settings.server.port}";
-       proxyWebsockets = true;
+   {
+     services.nginx.virtualHosts."matrix.example.org".locations = {
+       "/_matrix/maubot/" = {
+         proxyPass = "http://127.0.0.1:${toString config.services.maubot.settings.server.port}";
+         proxyWebsockets = true;
+       };
      };
-   };
-   services.maubot.settings.server.public_url = "matrix.example.org";
-   # do the following only if you want to use something other than /_matrix/maubot...
-   services.maubot.settings.server.ui_base_path = "/another/base/path";
+     services.maubot.settings.server.public_url = "matrix.example.org";
+     # do the following only if you want to use something other than /_matrix/maubot...
+     services.maubot.settings.server.ui_base_path = "/another/base/path";
+   }
    ```
 4. Optionally, set `services.maubot.pythonPackages` to a list of python3
    packages to make available for Maubot plugins.
 5. Optionally, set `services.maubot.plugins` to a list of Maubot
    plugins (full list available at https://plugins.maubot.xyz/):
    ```nix
-   services.maubot.plugins = with config.services.maubot.package.plugins; [
-     reactbot
-     # This will only change the default config! After you create a
-     # plugin instance, the default config will be copied into that
-     # instance's config in Maubot's database, and further base config
-     # changes won't affect the running plugin.
-     (rss.override {
-       base_config = {
-         update_interval = 60;
-         max_backoff = 7200;
-         spam_sleep = 2;
-         command_prefix = "rss";
-         admins = [ "@chayleaf:pavluk.org" ];
-       };
-     })
-   ];
-   # ...or...
-   services.maubot.plugins = config.services.maubot.package.plugins.allOfficialPlugins;
-   # ...or...
-   services.maubot.plugins = config.services.maubot.package.plugins.allPlugins;
-   # ...or...
-   services.maubot.plugins = with config.services.maubot.package.plugins; [
-     (weather.override {
-       # you can pass base_config as a string
-       base_config = ''
-         default_location: New York
-         default_units: M
-         default_language:
-         show_link: true
-         show_image: false
-       '';
-     })
-   ];
+   {
+     services.maubot.plugins = with config.services.maubot.package.plugins; [
+       reactbot
+       # This will only change the default config! After you create a
+       # plugin instance, the default config will be copied into that
+       # instance's config in Maubot's database, and further base config
+       # changes won't affect the running plugin.
+       (rss.override {
+         base_config = {
+           update_interval = 60;
+           max_backoff = 7200;
+           spam_sleep = 2;
+           command_prefix = "rss";
+           admins = [ "@chayleaf:pavluk.org" ];
+         };
+       })
+     ];
+     # ...or...
+     services.maubot.plugins = config.services.maubot.package.plugins.allOfficialPlugins;
+     # ...or...
+     services.maubot.plugins = config.services.maubot.package.plugins.allPlugins;
+     # ...or...
+     services.maubot.plugins = with config.services.maubot.package.plugins; [
+       (weather.override {
+         # you can pass base_config as a string
+         base_config = ''
+           default_location: New York
+           default_units: M
+           default_language:
+           show_link: true
+           show_image: false
+         '';
+       })
+     ];
+   }
    ```
 6. Start Maubot at least once before doing the following steps (it's
    necessary to generate the initial config).
diff --git a/nixpkgs/nixos/modules/services/matrix/mjolnir.md b/nixpkgs/nixos/modules/services/matrix/mjolnir.md
index f6994eeb8fa5..2594f05ce27b 100644
--- a/nixpkgs/nixos/modules/services/matrix/mjolnir.md
+++ b/nixpkgs/nixos/modules/services/matrix/mjolnir.md
@@ -46,7 +46,7 @@ autoconfigure a new Pantalaimon instance, which will connect to the homeserver
 set in [services.mjolnir.homeserverUrl](#opt-services.mjolnir.homeserverUrl) and Mjolnir itself
 will be configured to connect to the new Pantalaimon instance.
 
-```
+```nix
 {
   services.mjolnir = {
     enable = true;
@@ -78,7 +78,7 @@ uses across an entire homeserver.
 To use the Antispam Module, add `matrix-synapse-plugins.matrix-synapse-mjolnir-antispam`
 to the Synapse plugin list and enable the `mjolnir.Module` module.
 
-```
+```nix
 {
   services.matrix-synapse = {
     plugins = with pkgs; [
diff --git a/nixpkgs/nixos/modules/services/matrix/synapse.md b/nixpkgs/nixos/modules/services/matrix/synapse.md
index 9c9c025fc5f5..7f6587ce09df 100644
--- a/nixpkgs/nixos/modules/services/matrix/synapse.md
+++ b/nixpkgs/nixos/modules/services/matrix/synapse.md
@@ -23,7 +23,7 @@ synapse server for the `example.org` domain, served from
 the host `myhostname.example.org`. For more information,
 please refer to the
 [installation instructions of Synapse](https://element-hq.github.io/synapse/latest/setup/installation.html) .
-```
+```nix
 { pkgs, lib, config, ... }:
 let
   fqdn = "${config.networking.hostName}.${config.networking.domain}";
@@ -158,7 +158,7 @@ in an additional file like this:
     by `matrix-synapse`.
   - Include the file like this in your configuration:
 
-    ```
+    ```nix
     {
       services.matrix-synapse.extraConfigFiles = [
         "/run/secrets/matrix-shared-secret"
@@ -190,7 +190,7 @@ fill in the required connection details automatically when you enter your
 Matrix Identifier. See
 [Try Matrix Now!](https://matrix.org/docs/projects/try-matrix-now.html)
 for a list of existing clients and their supported featureset.
-```
+```nix
 {
   services.nginx.virtualHosts."element.${fqdn}" = {
     enableACME = true;
diff --git a/nixpkgs/nixos/modules/services/misc/anki-sync-server.md b/nixpkgs/nixos/modules/services/misc/anki-sync-server.md
index 5d2b4da4d2fc..f58d3d8ad0da 100644
--- a/nixpkgs/nixos/modules/services/misc/anki-sync-server.md
+++ b/nixpkgs/nixos/modules/services/misc/anki-sync-server.md
@@ -16,7 +16,7 @@ unit which runs the sync server with an isolated user using the systemd
 `DynamicUser` option.
 
 This can be done by enabling the `anki-sync-server` service:
-```
+```nix
 { ... }:
 
 {
@@ -27,7 +27,7 @@ This can be done by enabling the `anki-sync-server` service:
 It is necessary to set at least one username-password pair under
 {option}`services.anki-sync-server.users`. For example
 
-```
+```nix
 {
   services.anki-sync-server.users = [
     {
@@ -50,7 +50,7 @@ you want to expose the sync server directly to other computers (not recommended
 in most circumstances, because the sync server doesn't use HTTPS), then set the
 following options:
 
-```
+```nix
 {
   services.anki-sync-server.host = "0.0.0.0";
   services.anki-sync-server.openFirewall = true;
diff --git a/nixpkgs/nixos/modules/services/misc/forgejo.md b/nixpkgs/nixos/modules/services/misc/forgejo.md
index 14b21933e6b0..f234ebf44aef 100644
--- a/nixpkgs/nixos/modules/services/misc/forgejo.md
+++ b/nixpkgs/nixos/modules/services/misc/forgejo.md
@@ -57,23 +57,25 @@ locations and database, instead of having to copy or rename them.
 Make sure to disable `services.gitea`, when doing this.
 
 ```nix
-services.gitea.enable = false;
-
-services.forgejo = {
-  enable = true;
-  user = "gitea";
-  group = "gitea";
-  stateDir = "/var/lib/gitea";
-  database.name = "gitea";
-  database.user = "gitea";
-};
-
-users.users.gitea = {
-  home = "/var/lib/gitea";
-  useDefaultShell = true;
-  group = "gitea";
-  isSystemUser = true;
-};
-
-users.groups.gitea = {};
+{
+  services.gitea.enable = false;
+
+  services.forgejo = {
+    enable = true;
+    user = "gitea";
+    group = "gitea";
+    stateDir = "/var/lib/gitea";
+    database.name = "gitea";
+    database.user = "gitea";
+  };
+
+  users.users.gitea = {
+    home = "/var/lib/gitea";
+    useDefaultShell = true;
+    group = "gitea";
+    isSystemUser = true;
+  };
+
+  users.groups.gitea = {};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/misc/gitlab.md b/nixpkgs/nixos/modules/services/misc/gitlab.md
index 916b23584ed0..f7a5a8027489 100644
--- a/nixpkgs/nixos/modules/services/misc/gitlab.md
+++ b/nixpkgs/nixos/modules/services/misc/gitlab.md
@@ -10,19 +10,21 @@ configure a webserver to proxy HTTP requests to the socket.
 
 For instance, the following configuration could be used to use nginx as
 frontend proxy:
-```
-services.nginx = {
-  enable = true;
-  recommendedGzipSettings = true;
-  recommendedOptimisation = true;
-  recommendedProxySettings = true;
-  recommendedTlsSettings = true;
-  virtualHosts."git.example.com" = {
-    enableACME = true;
-    forceSSL = true;
-    locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket";
+```nix
+{
+  services.nginx = {
+    enable = true;
+    recommendedGzipSettings = true;
+    recommendedOptimisation = true;
+    recommendedProxySettings = true;
+    recommendedTlsSettings = true;
+    virtualHosts."git.example.com" = {
+      enableACME = true;
+      forceSSL = true;
+      locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket";
+    };
   };
-};
+}
 ```
 
 ## Configuring {#module-services-gitlab-configuring}
@@ -35,36 +37,38 @@ The default state dir is `/var/gitlab/state`. This is where
 all data like the repositories and uploads will be stored.
 
 A basic configuration with some custom settings could look like this:
-```
-services.gitlab = {
-  enable = true;
-  databasePasswordFile = "/var/keys/gitlab/db_password";
-  initialRootPasswordFile = "/var/keys/gitlab/root_password";
-  https = true;
-  host = "git.example.com";
-  port = 443;
-  user = "git";
-  group = "git";
-  smtp = {
+```nix
+{
+  services.gitlab = {
     enable = true;
-    address = "localhost";
-    port = 25;
-  };
-  secrets = {
-    dbFile = "/var/keys/gitlab/db";
-    secretFile = "/var/keys/gitlab/secret";
-    otpFile = "/var/keys/gitlab/otp";
-    jwsFile = "/var/keys/gitlab/jws";
-  };
-  extraConfig = {
-    gitlab = {
-      email_from = "gitlab-no-reply@example.com";
-      email_display_name = "Example GitLab";
-      email_reply_to = "gitlab-no-reply@example.com";
-      default_projects_features = { builds = false; };
+    databasePasswordFile = "/var/keys/gitlab/db_password";
+    initialRootPasswordFile = "/var/keys/gitlab/root_password";
+    https = true;
+    host = "git.example.com";
+    port = 443;
+    user = "git";
+    group = "git";
+    smtp = {
+      enable = true;
+      address = "localhost";
+      port = 25;
+    };
+    secrets = {
+      dbFile = "/var/keys/gitlab/db";
+      secretFile = "/var/keys/gitlab/secret";
+      otpFile = "/var/keys/gitlab/otp";
+      jwsFile = "/var/keys/gitlab/jws";
+    };
+    extraConfig = {
+      gitlab = {
+        email_from = "gitlab-no-reply@example.com";
+        email_display_name = "Example GitLab";
+        email_reply_to = "gitlab-no-reply@example.com";
+        default_projects_features = { builds = false; };
+      };
     };
   };
-};
+}
 ```
 
 If you're setting up a new GitLab instance, generate new
diff --git a/nixpkgs/nixos/modules/services/misc/mollysocket.nix b/nixpkgs/nixos/modules/services/misc/mollysocket.nix
new file mode 100644
index 000000000000..f40caa4a782e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/mollysocket.nix
@@ -0,0 +1,133 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) getExe mkIf mkOption mkEnableOption optionals types;
+
+  cfg = config.services.mollysocket;
+  configuration = format.generate "mollysocket.conf" cfg.settings;
+  format = pkgs.formats.toml { };
+  package = pkgs.writeShellScriptBin "mollysocket" ''
+    MOLLY_CONF=${configuration} exec ${getExe pkgs.mollysocket} "$@"
+  '';
+in {
+  options.services.mollysocket = {
+    enable = mkEnableOption ''
+      [MollySocket](https://github.com/mollyim/mollysocket) for getting Signal
+      notifications via UnifiedPush
+    '';
+
+    settings = mkOption {
+      default = { };
+      description = ''
+        Configuration for MollySocket. Available options are listed
+        [here](https://github.com/mollyim/mollysocket#configuration).
+      '';
+      type = types.submodule {
+        freeformType = format.type;
+        options = {
+          host = mkOption {
+            default = "127.0.0.1";
+            description = "Listening address of the web server";
+            type = types.str;
+          };
+
+          port = mkOption {
+            default = 8020;
+            description = "Listening port of the web server";
+            type = types.port;
+          };
+
+          allowed_endpoints = mkOption {
+            default = [ "*" ];
+            description = "List of UnifiedPush servers";
+            example = [ "https://ntfy.sh" ];
+            type = with types; listOf str;
+          };
+
+          allowed_uuids = mkOption {
+            default = [ "*" ];
+            description = "UUIDs of Signal accounts that may use this server";
+            example = [ "abcdef-12345-tuxyz-67890" ];
+            type = with types; listOf str;
+          };
+        };
+      };
+    };
+
+    environmentFile = mkOption {
+      default = null;
+      description = ''
+        Environment file (see {manpage}`systemd.exec(5)` "EnvironmentFile="
+        section for the syntax) passed to the service. This option can be
+        used to safely include secrets in the configuration.
+      '';
+      example = "/run/secrets/mollysocket";
+      type = with types; nullOr path;
+    };
+
+    logLevel = mkOption {
+      default = "info";
+      description = "Set the {env}`RUST_LOG` environment variable";
+      example = "debug";
+      type = types.str;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [
+      package
+    ];
+
+    # see https://github.com/mollyim/mollysocket/blob/main/mollysocket.service
+    systemd.services.mollysocket = {
+      description = "MollySocket";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      environment.RUST_LOG = cfg.logLevel;
+      serviceConfig = let
+        capabilities = [ "" ] ++ optionals (cfg.settings.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+      in {
+        EnvironmentFile = cfg.environmentFile;
+        ExecStart = "${getExe package} server";
+        KillSignal = "SIGINT";
+        Restart = "on-failure";
+        StateDirectory = "mollysocket";
+        TimeoutStopSec = 5;
+        WorkingDirectory = "/var/lib/mollysocket";
+
+        # hardening
+        AmbientCapabilities = capabilities;
+        CapabilityBoundingSet = capabilities;
+        DevicePolicy = "closed";
+        DynamicUser = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@resources" "~@privileged" ];
+        UMask = "0077";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ dotlambda ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/paperless.nix b/nixpkgs/nixos/modules/services/misc/paperless.nix
index 9314c4f3848d..9301d1f68725 100644
--- a/nixpkgs/nixos/modules/services/misc/paperless.nix
+++ b/nixpkgs/nixos/modules/services/misc/paperless.nix
@@ -27,6 +27,8 @@ let
       name = "paperless_ngx_nltk_data";
       paths = pkg.nltkData;
     };
+  } // optionalAttrs (cfg.openMPThreadingWorkaround) {
+    OMP_NUM_THREADS = "1";
   } // (lib.mapAttrs (_: s:
     if (lib.isAttrs s || lib.isList s) then builtins.toJSON s
     else if lib.isBool s then lib.boolToString s
@@ -199,6 +201,20 @@ in
     };
 
     package = mkPackageOption pkgs "paperless-ngx" { };
+
+    openMPThreadingWorkaround = mkEnableOption ''
+      a workaround for document classifier timeouts.
+
+      Paperless uses OpenBLAS via scikit-learn for document classification.
+
+      The default is to use threading for OpenMP but this would cause the
+      document classifier to spin on one core seemingly indefinitely if there
+      are large amounts of classes per classification; causing it to
+      effectively never complete due to running into timeouts.
+
+      This sets `OMP_NUM_THREADS` to `1` in order to mitigate the issue. See
+      https://github.com/NixOS/nixpkgs/issues/240591 for more information.
+    '' // mkOption { default = true; };
   };
 
   config = mkIf cfg.enable {
diff --git a/nixpkgs/nixos/modules/services/misc/sourcehut/default.md b/nixpkgs/nixos/modules/services/misc/sourcehut/default.md
index 44d58aa0bef3..f965c395038a 100644
--- a/nixpkgs/nixos/modules/services/misc/sourcehut/default.md
+++ b/nixpkgs/nixos/modules/services/misc/sourcehut/default.md
@@ -12,7 +12,7 @@ This NixOS module also provides basic configuration integrating Sourcehut into l
 and `services.postgresql` services.
 
 A very basic configuration may look like this:
-```
+```nix
 { pkgs, ... }:
 let
   fqdn =
@@ -66,9 +66,9 @@ in {
     # Settings to setup what certificates are used for which endpoint.
     virtualHosts = {
       "${fqdn}".enableACME = true;
-      "meta.${fqdn}".useACMEHost = fqdn:
-      "man.${fqdn}".useACMEHost = fqdn:
-      "git.${fqdn}".useACMEHost = fqdn:
+      "meta.${fqdn}".useACMEHost = fqdn;
+      "man.${fqdn}".useACMEHost = fqdn;
+      "git.${fqdn}".useACMEHost = fqdn;
     };
   };
 }
diff --git a/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix b/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix
index a8300ecd5233..1b1fde78ad0a 100644
--- a/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix
+++ b/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix
@@ -20,7 +20,10 @@ let
   manage = pkgs.writeShellScript "manage" ''
     set -o allexport # Export the following env vars
     ${lib.toShellVars env}
-    exec ${pkg}/bin/tandoor-recipes "$@"
+    eval "$(${config.systemd.package}/bin/systemctl show -pUID,GID,MainPID tandoor-recipes.service)"
+    exec ${pkgs.util-linux}/bin/nsenter \
+      -t $MainPID -m -S $UID -G $GID \
+      ${pkg}/bin/tandoor-recipes "$@"
   '';
 in
 {
@@ -82,6 +85,7 @@ in
         Restart = "on-failure";
 
         User = "tandoor_recipes";
+        Group = "tandoor_recipes";
         DynamicUser = true;
         StateDirectory = "tandoor-recipes";
         WorkingDirectory = "/var/lib/tandoor-recipes";
diff --git a/nixpkgs/nixos/modules/services/misc/weechat.md b/nixpkgs/nixos/modules/services/misc/weechat.md
index 21f41be5b4a0..fb20ebe1e4db 100644
--- a/nixpkgs/nixos/modules/services/misc/weechat.md
+++ b/nixpkgs/nixos/modules/services/misc/weechat.md
@@ -12,7 +12,7 @@ unit which runs the chat client in a detached
 session.
 
 This can be done by enabling the `weechat` service:
-```
+```nix
 { ... }:
 
 {
@@ -30,7 +30,7 @@ allow your another user to attach to this session, the
 `screenrc` needs to be tweaked by adding
 [multiuser](https://www.gnu.org/software/screen/manual/html_node/Multiuser.html#Multiuser)
 support:
-```
+```nix
 {
   programs.screen.screenrc = ''
     multiuser on
diff --git a/nixpkgs/nixos/modules/services/monitoring/certspotter.md b/nixpkgs/nixos/modules/services/monitoring/certspotter.md
index 9bf6e1d946a0..e999bfe65ec3 100644
--- a/nixpkgs/nixos/modules/services/monitoring/certspotter.md
+++ b/nixpkgs/nixos/modules/services/monitoring/certspotter.md
@@ -9,17 +9,19 @@ A basic config that notifies you of all certificate changes for your
 domain would look as follows:
 
 ```nix
-services.certspotter = {
-  enable = true;
-  # replace example.org with your domain name
-  watchlist = [ ".example.org" ];
-  emailRecipients = [ "webmaster@example.org" ];
-};
+{
+  services.certspotter = {
+    enable = true;
+    # replace example.org with your domain name
+    watchlist = [ ".example.org" ];
+    emailRecipients = [ "webmaster@example.org" ];
+  };
 
-# Configure an SMTP client
-programs.msmtp.enable = true;
-# Or you can use any other module that provides sendmail, like
-# services.nullmailer, services.opensmtpd, services.postfix
+  # Configure an SMTP client
+  programs.msmtp.enable = true;
+  # Or you can use any other module that provides sendmail, like
+  # services.nullmailer, services.opensmtpd, services.postfix
+}
 ```
 
 In this case, the leading dot in `".example.org"` means that Cert
@@ -59,16 +61,18 @@ For example, you can remove `emailRecipients` and send email
 notifications manually using the following hook:
 
 ```nix
-services.certspotter.hooks = [
-  (pkgs.writeShellScript "certspotter-hook" ''
-    function print_email() {
-      echo "Subject: [certspotter] $SUMMARY"
-      echo "Mime-Version: 1.0"
-      echo "Content-Type: text/plain; charset=US-ASCII"
-      echo
-      cat "$TEXT_FILENAME"
-    }
-    print_email | ${config.services.certspotter.sendmailPath} -i webmaster@example.org
-  '')
-];
+{
+  services.certspotter.hooks = [
+    (pkgs.writeShellScript "certspotter-hook" ''
+      function print_email() {
+        echo "Subject: [certspotter] $SUMMARY"
+        echo "Mime-Version: 1.0"
+        echo "Content-Type: text/plain; charset=US-ASCII"
+        echo
+        cat "$TEXT_FILENAME"
+      }
+      print_email | ${config.services.certspotter.sendmailPath} -i webmaster@example.org
+    '')
+  ];
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/monitoring/goss.md b/nixpkgs/nixos/modules/services/monitoring/goss.md
index 1e636aa3bdf3..bf91d42011fa 100644
--- a/nixpkgs/nixos/modules/services/monitoring/goss.md
+++ b/nixpkgs/nixos/modules/services/monitoring/goss.md
@@ -7,7 +7,7 @@ for validating a server's configuration.
 
 A minimal configuration looks like this:
 
-```
+```nix
 {
   services.goss = {
     enable = true;
diff --git a/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md b/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md
index eac07e0cc9fe..765846bbbaf3 100644
--- a/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md
+++ b/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md
@@ -11,15 +11,17 @@ email address and saves them to a local Elasticsearch instance looks
 like this:
 
 ```nix
-services.parsedmarc = {
-  enable = true;
-  settings.imap = {
-    host = "imap.example.com";
-    user = "alice@example.com";
-    password = "/path/to/imap_password_file";
+{
+  services.parsedmarc = {
+    enable = true;
+    settings.imap = {
+      host = "imap.example.com";
+      user = "alice@example.com";
+      password = "/path/to/imap_password_file";
+    };
+    provision.geoIp = false; # Not recommended!
   };
-  provision.geoIp = false; # Not recommended!
-};
+}
 ```
 
 Note that GeoIP provisioning is disabled in the example for
@@ -37,16 +39,18 @@ configured in the domain's dmarc policy is
 `dmarc@monitoring.example.com`.
 
 ```nix
-services.parsedmarc = {
-  enable = true;
-  provision = {
-    localMail = {
-      enable = true;
-      hostname = monitoring.example.com;
+{
+  services.parsedmarc = {
+    enable = true;
+    provision = {
+      localMail = {
+        enable = true;
+        hostname = monitoring.example.com;
+      };
+      geoIp = false; # Not recommended!
     };
-    geoIp = false; # Not recommended!
   };
-};
+}
 ```
 
 ## Grafana and GeoIP {#module-services-parsedmarc-grafana-geoip}
@@ -58,55 +62,57 @@ is automatically added as a Grafana datasource, and the dashboard is
 added to Grafana as well.
 
 ```nix
-services.parsedmarc = {
-  enable = true;
-  provision = {
-    localMail = {
-      enable = true;
-      hostname = url;
-    };
-    grafana = {
-      datasource = true;
-      dashboard = true;
+{
+  services.parsedmarc = {
+    enable = true;
+    provision = {
+      localMail = {
+        enable = true;
+        hostname = url;
+      };
+      grafana = {
+        datasource = true;
+        dashboard = true;
+      };
     };
   };
-};
 
-# Not required, but recommended for full functionality
-services.geoipupdate = {
-  settings = {
-    AccountID = 000000;
-    LicenseKey = "/path/to/license_key_file";
+  # Not required, but recommended for full functionality
+  services.geoipupdate = {
+    settings = {
+      AccountID = 000000;
+      LicenseKey = "/path/to/license_key_file";
+    };
   };
-};
 
-services.grafana = {
-  enable = true;
-  addr = "0.0.0.0";
-  domain = url;
-  rootUrl = "https://" + url;
-  protocol = "socket";
-  security = {
-    adminUser = "admin";
-    adminPasswordFile = "/path/to/admin_password_file";
-    secretKeyFile = "/path/to/secret_key_file";
+  services.grafana = {
+    enable = true;
+    addr = "0.0.0.0";
+    domain = url;
+    rootUrl = "https://" + url;
+    protocol = "socket";
+    security = {
+      adminUser = "admin";
+      adminPasswordFile = "/path/to/admin_password_file";
+      secretKeyFile = "/path/to/secret_key_file";
+    };
   };
-};
 
-services.nginx = {
-  enable = true;
-  recommendedTlsSettings = true;
-  recommendedOptimisation = true;
-  recommendedGzipSettings = true;
-  recommendedProxySettings = true;
-  upstreams.grafana.servers."unix:/${config.services.grafana.socket}" = {};
-  virtualHosts.${url} = {
-    root = config.services.grafana.staticRootPath;
-    enableACME = true;
-    forceSSL = true;
-    locations."/".tryFiles = "$uri @grafana";
-    locations."@grafana".proxyPass = "http://grafana";
+  services.nginx = {
+    enable = true;
+    recommendedTlsSettings = true;
+    recommendedOptimisation = true;
+    recommendedGzipSettings = true;
+    recommendedProxySettings = true;
+    upstreams.grafana.servers."unix:/${config.services.grafana.socket}" = {};
+    virtualHosts.${url} = {
+      root = config.services.grafana.staticRootPath;
+      enableACME = true;
+      forceSSL = true;
+      locations."/".tryFiles = "$uri @grafana";
+      locations."@grafana".proxyPass = "http://grafana";
+    };
   };
-};
-users.users.nginx.extraGroups = [ "grafana" ];
+  users.users.nginx.extraGroups = [ "grafana" ];
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md
index 34fadecadc74..b344534f6aee 100644
--- a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md
@@ -9,7 +9,8 @@ One of the most common exporters is the
 [node exporter](https://github.com/prometheus/node_exporter),
 it provides hardware and OS metrics from the host it's
 running on. The exporter could be configured as follows:
-```
+```nix
+{
   services.prometheus.exporters.node = {
     enable = true;
     port = 9100;
@@ -23,6 +24,7 @@ running on. The exporter could be configured as follows:
     openFirewall = true;
     firewallFilter = "-i br0 -p tcp -m tcp --dport 9100";
   };
+}
 ```
 It should now serve all metrics from the collectors that are explicitly
 enabled and the ones that are
@@ -35,7 +37,8 @@ configuration see `man configuration.nix` or search through
 the [available options](https://nixos.org/nixos/options.html#prometheus.exporters).
 
 Prometheus can now be configured to consume the metrics produced by the exporter:
-```
+```nix
+{
     services.prometheus = {
       # ...
 
@@ -49,7 +52,8 @@ Prometheus can now be configured to consume the metrics produced by the exporter
       ];
 
       # ...
-    }
+    };
+}
 ```
 
 ## Adding a new exporter {#module-services-prometheus-exporters-new-exporter}
@@ -75,7 +79,7 @@ example:
     `nixos/modules/services/monitoring/prometheus/exporters/`
     directory, which will be called postfix.nix and contains all exporter
     specific options and configuration:
-    ```
+    ```nix
     # nixpkgs/nixos/modules/services/prometheus/exporters/postfix.nix
     { config, lib, pkgs, options }:
 
@@ -148,7 +152,7 @@ example:
 Should an exporter option change at some point, it is possible to add
 information about the change to the exporter definition similar to
 `nixpkgs/nixos/modules/rename.nix`:
-```
+```nix
 { config, lib, pkgs, options }:
 
 with lib;
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md b/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md
index 8d8486507b77..626d69df84a5 100644
--- a/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md
+++ b/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md
@@ -8,7 +8,7 @@ replication tool for SQLite.
 Litestream service is managed by a dedicated user named `litestream`
 which needs permission to the database file. Here's an example config which gives
 required permissions to access [grafana database](#opt-services.grafana.settings.database.path):
-```
+```nix
 { pkgs, ... }:
 {
   users.users.litestream.extraGroups = [ "grafana" ];
diff --git a/nixpkgs/nixos/modules/services/networking/dnsproxy.nix b/nixpkgs/nixos/modules/services/networking/dnsproxy.nix
new file mode 100644
index 000000000000..f0be74d7591f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dnsproxy.nix
@@ -0,0 +1,106 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    escapeShellArgs
+    getExe
+    lists
+    literalExpression
+    maintainers
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOption
+    types;
+
+  cfg = config.services.dnsproxy;
+
+  yaml = pkgs.formats.yaml { };
+  configFile = yaml.generate "config.yaml" cfg.settings;
+
+  finalFlags = (lists.optional (cfg.settings != { }) "--config-path=${configFile}") ++ cfg.flags;
+in
+{
+
+  options.services.dnsproxy = {
+
+    enable = mkEnableOption (lib.mdDoc "dnsproxy");
+
+    package = mkPackageOption pkgs "dnsproxy" { };
+
+    settings = mkOption {
+      type = yaml.type;
+      default = { };
+      example = literalExpression ''
+        {
+          bootstrap = [
+            "8.8.8.8:53"
+          ];
+          listen-addrs = [
+            "0.0.0.0"
+          ];
+          listen-ports = [
+            53
+          ];
+          upstream = [
+            "1.1.1.1:53"
+          ];
+        }
+      '';
+      description = mdDoc ''
+        Contents of the `config.yaml` config file.
+        The `--config-path` argument will only be passed if this set is not empty.
+
+        See <https://github.com/AdguardTeam/dnsproxy/blob/master/config.yaml.dist>.
+      '';
+    };
+
+    flags = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--upstream=1.1.1.1:53" ];
+      description = lib.mdDoc ''
+        A list of extra command-line flags to pass to dnsproxy. For details on the
+        available options, see <https://github.com/AdguardTeam/dnsproxy#usage>.
+        Keep in mind that options passed through command-line flags override
+        config options.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.dnsproxy = {
+      description = "Simple DNS proxy with DoH, DoT, DoQ and DNSCrypt support";
+      after = [ "network.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${getExe cfg.package} ${escapeShellArgs finalFlags}";
+        Restart = "always";
+        RestartSec = 10;
+        DynamicUser = true;
+
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        ProtectClock = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [ "@system-service" "~@privileged @resources" ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ diogotcorreia ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md b/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
index 4d8777d204bb..f6b515e67f15 100644
--- a/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
+++ b/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
@@ -7,19 +7,21 @@ A storage server for Firefox Sync that you can easily host yourself.
 The absolute minimal configuration for the sync server looks like this:
 
 ```nix
-services.mysql.package = pkgs.mariadb;
-
-services.firefox-syncserver = {
-  enable = true;
-  secrets = builtins.toFile "sync-secrets" ''
-    SYNC_MASTER_SECRET=this-secret-is-actually-leaked-to-/nix/store
-  '';
-  singleNode = {
+{
+  services.mysql.package = pkgs.mariadb;
+
+  services.firefox-syncserver = {
     enable = true;
-    hostname = "localhost";
-    url = "http://localhost:5000";
+    secrets = builtins.toFile "sync-secrets" ''
+      SYNC_MASTER_SECRET=this-secret-is-actually-leaked-to-/nix/store
+    '';
+    singleNode = {
+      enable = true;
+      hostname = "localhost";
+      url = "http://localhost:5000";
+    };
   };
-};
+}
 ```
 
 This will start a sync server that is only accessible locally. Once the services is
diff --git a/nixpkgs/nixos/modules/services/networking/microsocks.nix b/nixpkgs/nixos/modules/services/networking/microsocks.nix
new file mode 100644
index 000000000000..be79a8495636
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/microsocks.nix
@@ -0,0 +1,146 @@
+{ config,
+  lib,
+  pkgs,
+  ...
+}:
+
+let
+  cfg = config.services.microsocks;
+
+  cmd =
+    if cfg.execWrapper != null
+    then "${cfg.execWrapper} ${cfg.package}/bin/microsocks"
+    else "${cfg.package}/bin/microsocks";
+  args =
+    [ "-i" cfg.ip "-p" (toString cfg.port) ]
+    ++ lib.optionals (cfg.authOnce) [ "-1" ]
+    ++ lib.optionals (cfg.disableLogging) [ "-q" ]
+    ++ lib.optionals (cfg.outgoingBindIp != null) [ "-b" cfg.outgoingBindIp ]
+    ++ lib.optionals (cfg.authUsername != null) [ "-u" cfg.authUsername ];
+in {
+  options.services.microsocks = {
+    enable = lib.mkEnableOption (lib.mdDoc "Tiny, portable SOCKS5 server with very moderate resource usage");
+    user = lib.mkOption {
+      default = "microsocks";
+      description = lib.mdDoc "User microsocks runs as.";
+      type = lib.types.str;
+    };
+    group = lib.mkOption {
+      default = "microsocks";
+      description = lib.mdDoc "Group microsocks runs as.";
+      type = lib.types.str;
+    };
+    package = lib.mkPackageOption pkgs "microsocks" {};
+    ip = lib.mkOption {
+      type = lib.types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        IP on which microsocks should listen. Defaults to 127.0.0.1 for
+        security reasons.
+      '';
+    };
+    port = lib.mkOption {
+      type = lib.types.port;
+      default = 1080;
+      description = lib.mdDoc "Port on which microsocks should listen.";
+    };
+    disableLogging = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "If true, microsocks will not log any messages to stdout/stderr.";
+    };
+    authOnce = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If true, once a specific ip address authed successfully with user/pass,
+        it is added to a whitelist and may use the proxy without auth.
+      '';
+    };
+    outgoingBindIp = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc "Specifies which ip outgoing connections are bound to";
+    };
+    authUsername = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      example = "alice";
+      description = lib.mdDoc "Optional username to use for authentication.";
+    };
+    authPasswordFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      example = "/run/secrets/microsocks-password";
+      description = lib.mdDoc "Path to a file containing the password for authentication.";
+    };
+    execWrapper = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      example = ''
+        ''${pkgs.mullvad-vpn}/bin/mullvad-exclude
+      '';
+      description = lib.mdDoc ''
+        An optional command to prepend to the microsocks command (such as proxychains, or a VPN exclude command).
+      '';
+    };
+  };
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.authUsername != null) == (cfg.authPasswordFile != null);
+        message = "Need to set both authUsername and authPasswordFile for microsocks";
+      }
+    ];
+    users = {
+      users = lib.mkIf (cfg.user == "microsocks") {
+        microsocks = {
+          group = cfg.group;
+          isSystemUser = true;
+        };
+      };
+      groups = lib.mkIf (cfg.group == "microsocks") {
+        microsocks = {};
+      };
+    };
+    systemd.services.microsocks = {
+      enable = true;
+      description = "a tiny socks server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "on-failure";
+        RestartSec = 10;
+        LoadCredential = lib.optionalString (cfg.authPasswordFile != null) "MICROSOCKS_PASSWORD_FILE:${cfg.authPasswordFile}";
+        MemoryDenyWriteExecute = true;
+        SystemCallArchitectures = "native";
+        PrivateTmp = true;
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        PrivateDevices = true;
+        RestrictSUIDSGID = true;
+        RestrictNamespaces = [
+          "cgroup"
+          "ipc"
+          "pid"
+          "user"
+          "uts"
+        ];
+      };
+      script =
+        if cfg.authPasswordFile != null
+        then ''
+          PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/MICROSOCKS_PASSWORD_FILE")
+          ${cmd} ${lib.escapeShellArgs args} -P "$PASSWORD"
+        ''
+        else ''
+          ${cmd} ${lib.escapeShellArgs args}
+        '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mosquitto.md b/nixpkgs/nixos/modules/services/networking/mosquitto.md
index 5cdb598151e5..66b3ad6cfa8f 100644
--- a/nixpkgs/nixos/modules/services/networking/mosquitto.md
+++ b/nixpkgs/nixos/modules/services/networking/mosquitto.md
@@ -7,14 +7,16 @@ Mosquitto is a MQTT broker often used for IoT or home automation data transport.
 A minimal configuration for Mosquitto is
 
 ```nix
-services.mosquitto = {
-  enable = true;
-  listeners = [ {
-    acl = [ "pattern readwrite #" ];
-    omitPasswordAuth = true;
-    settings.allow_anonymous = true;
-  } ];
-};
+{
+  services.mosquitto = {
+    enable = true;
+    listeners = [ {
+      acl = [ "pattern readwrite #" ];
+      omitPasswordAuth = true;
+      settings.allow_anonymous = true;
+    } ];
+  };
+}
 ```
 
 This will start a broker on port 1883, listening on all interfaces of the machine, allowing
@@ -25,37 +27,42 @@ full read access to a user `monitor` and restricted write access to a user `serv
 like
 
 ```nix
-services.mosquitto = {
-  enable = true;
-  listeners = [ {
-    users = {
-      monitor = {
-        acl = [ "read #" ];
-        password = "monitor";
+{
+  services.mosquitto = {
+    enable = true;
+    listeners = [ {
+      users = {
+        monitor = {
+          acl = [ "read #" ];
+          password = "monitor";
+        };
+        service = {
+          acl = [ "write service/#" ];
+          password = "service";
+        };
       };
-      service = {
-        acl = [ "write service/#" ];
-        password = "service";
-      };
-    };
-  } ];
-};
+    } ];
+  };
+}
 ```
 
 TLS authentication is configured by setting TLS-related options of the listener:
 
 ```nix
-services.mosquitto = {
-  enable = true;
-  listeners = [ {
-    port = 8883; # port change is not required, but helpful to avoid mistakes
-    # ...
-    settings = {
-      cafile = "/path/to/mqtt.ca.pem";
-      certfile = "/path/to/mqtt.pem";
-      keyfile = "/path/to/mqtt.key";
-    };
-  } ];
+{
+  services.mosquitto = {
+    enable = true;
+    listeners = [ {
+      port = 8883; # port change is not required, but helpful to avoid mistakes
+      # ...
+      settings = {
+        cafile = "/path/to/mqtt.ca.pem";
+        certfile = "/path/to/mqtt.pem";
+        keyfile = "/path/to/mqtt.key";
+      };
+    } ];
+  };
+}
 ```
 
 ## Configuration {#module-services-mosquitto-config}
diff --git a/nixpkgs/nixos/modules/services/networking/netbird.md b/nixpkgs/nixos/modules/services/networking/netbird.md
index a326207becc8..e1f6753cbd30 100644
--- a/nixpkgs/nixos/modules/services/networking/netbird.md
+++ b/nixpkgs/nixos/modules/services/networking/netbird.md
@@ -5,7 +5,9 @@
 The absolute minimal configuration for the netbird daemon looks like this:
 
 ```nix
-services.netbird.enable = true;
+{
+  services.netbird.enable = true;
+}
 ```
 
 This will set up a netbird service listening on the port `51820` associated to the
@@ -14,7 +16,9 @@ This will set up a netbird service listening on the port `51820` associated to t
 It is strictly equivalent to setting:
 
 ```nix
-services.netbird.tunnels.wt0.stateDir = "netbird";
+{
+  services.netbird.tunnels.wt0.stateDir = "netbird";
+}
 ```
 
 The `enable` option is mainly kept for backward compatibility, as defining netbird
@@ -29,11 +33,13 @@ The following configuration will start a netbird daemon using the interface `wt1
 the port 51830. Its configuration file will then be located at `/var/lib/netbird-wt1/config.json`.
 
 ```nix
-services.netbird.tunnels = {
-  wt1 = {
-    port = 51830;
+{
+  services.netbird.tunnels = {
+    wt1 = {
+      port = 51830;
+    };
   };
-};
+}
 ```
 
 To interact with it, you will need to specify the correct daemon address:
@@ -48,9 +54,11 @@ It is also possible to overwrite default options passed to the service, for
 example:
 
 ```nix
-services.netbird.tunnels.wt1.environment = {
-  NB_DAEMON_ADDR = "unix:///var/run/toto.sock"
-};
+{
+  services.netbird.tunnels.wt1.environment = {
+    NB_DAEMON_ADDR = "unix:///var/run/toto.sock";
+  };
+}
 ```
 
 This will set the socket to interact with the netbird service to `/var/run/toto.sock`.
diff --git a/nixpkgs/nixos/modules/services/networking/pleroma.md b/nixpkgs/nixos/modules/services/networking/pleroma.md
index 7c499e1c616c..c2313fd63e6a 100644
--- a/nixpkgs/nixos/modules/services/networking/pleroma.md
+++ b/nixpkgs/nixos/modules/services/networking/pleroma.md
@@ -17,11 +17,13 @@ The `config.exs` file can be further customized following the instructions on th
 ## Initializing the database {#module-services-pleroma-initialize-db}
 
 First, the Postgresql service must be enabled in the NixOS configuration
-```
-services.postgresql = {
-  enable = true;
-  package = pkgs.postgresql_13;
-};
+```nix
+{
+  services.postgresql = {
+    enable = true;
+    package = pkgs.postgresql_13;
+  };
+}
 ```
 and activated with the usual
 ```ShellSession
@@ -38,43 +40,45 @@ $ sudo -u postgres psql -f setup.psql
 In this section we will enable the Pleroma service only locally, so its configurations can be improved incrementally.
 
 This is an example of configuration, where [](#opt-services.pleroma.configs) option contains the content of the file `config.exs`, generated [in the first section](#module-services-pleroma-generate-config), but with the secrets (database password, endpoint secret key, salts, etc.) removed. Removing secrets is important, because otherwise they will be stored publicly in the Nix store.
-```
-services.pleroma = {
-  enable = true;
-  secretConfigFile = "/var/lib/pleroma/secrets.exs";
-  configs = [
-    ''
-    import Config
-
-    config :pleroma, Pleroma.Web.Endpoint,
-      url: [host: "pleroma.example.net", scheme: "https", port: 443],
-      http: [ip: {127, 0, 0, 1}, port: 4000]
-
-    config :pleroma, :instance,
-      name: "Test",
-      email: "admin@example.net",
-      notify_email: "admin@example.net",
-      limit: 5000,
-      registrations_open: true
-
-    config :pleroma, :media_proxy,
-      enabled: false,
-      redirect_on_failure: true
-
-    config :pleroma, Pleroma.Repo,
-      adapter: Ecto.Adapters.Postgres,
-      username: "pleroma",
-      database: "pleroma",
-      hostname: "localhost"
-
-    # Configure web push notifications
-    config :web_push_encryption, :vapid_details,
-      subject: "mailto:admin@example.net"
-
-    # ... TO CONTINUE ...
-    ''
-  ];
-};
+```nix
+{
+  services.pleroma = {
+    enable = true;
+    secretConfigFile = "/var/lib/pleroma/secrets.exs";
+    configs = [
+      ''
+      import Config
+
+      config :pleroma, Pleroma.Web.Endpoint,
+        url: [host: "pleroma.example.net", scheme: "https", port: 443],
+        http: [ip: {127, 0, 0, 1}, port: 4000]
+
+      config :pleroma, :instance,
+        name: "Test",
+        email: "admin@example.net",
+        notify_email: "admin@example.net",
+        limit: 5000,
+        registrations_open: true
+
+      config :pleroma, :media_proxy,
+        enabled: false,
+        redirect_on_failure: true
+
+      config :pleroma, Pleroma.Repo,
+        adapter: Ecto.Adapters.Postgres,
+        username: "pleroma",
+        database: "pleroma",
+        hostname: "localhost"
+
+      # Configure web push notifications
+      config :web_push_encryption, :vapid_details,
+        subject: "mailto:admin@example.net"
+
+      # ... TO CONTINUE ...
+      ''
+    ];
+  };
+}
 ```
 
 Secrets must be moved into a file pointed by [](#opt-services.pleroma.secretConfigFile), in our case `/var/lib/pleroma/secrets.exs`. This file can be created copying the previously generated `config.exs` file and then removing all the settings, except the secrets. This is an example
@@ -121,60 +125,62 @@ $ pleroma_ctl user new <nickname> <email>  --admin --moderator --password <passw
 
 In this configuration, Pleroma is listening only on the local port 4000. Nginx can be configured as a Reverse Proxy, for forwarding requests from public ports to the Pleroma service. This is an example of configuration, using
 [Let's Encrypt](https://letsencrypt.org/) for the TLS certificates
-```
-security.acme = {
-  email = "root@example.net";
-  acceptTerms = true;
-};
-
-services.nginx = {
-  enable = true;
-  addSSL = true;
-
-  recommendedTlsSettings = true;
-  recommendedOptimisation = true;
-  recommendedGzipSettings = true;
-
-  recommendedProxySettings = false;
-  # NOTE: if enabled, the NixOS proxy optimizations will override the Pleroma
-  # specific settings, and they will enter in conflict.
-
-  virtualHosts = {
-    "pleroma.example.net" = {
-      http2 = true;
-      enableACME = true;
-      forceSSL = true;
-
-      locations."/" = {
-        proxyPass = "http://127.0.0.1:4000";
-
-        extraConfig = ''
-          etag on;
-          gzip on;
-
-          add_header 'Access-Control-Allow-Origin' '*' always;
-          add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
-          add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
-          add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
-          if ($request_method = OPTIONS) {
-            return 204;
-          }
-          add_header X-XSS-Protection "1; mode=block";
-          add_header X-Permitted-Cross-Domain-Policies none;
-          add_header X-Frame-Options DENY;
-          add_header X-Content-Type-Options nosniff;
-          add_header Referrer-Policy same-origin;
-          add_header X-Download-Options noopen;
-          proxy_http_version 1.1;
-          proxy_set_header Upgrade $http_upgrade;
-          proxy_set_header Connection "upgrade";
-          proxy_set_header Host $host;
-
-          client_max_body_size 16m;
-          # NOTE: increase if users need to upload very big files
-        '';
+```nix
+{
+  security.acme = {
+    email = "root@example.net";
+    acceptTerms = true;
+  };
+
+  services.nginx = {
+    enable = true;
+    addSSL = true;
+
+    recommendedTlsSettings = true;
+    recommendedOptimisation = true;
+    recommendedGzipSettings = true;
+
+    recommendedProxySettings = false;
+    # NOTE: if enabled, the NixOS proxy optimizations will override the Pleroma
+    # specific settings, and they will enter in conflict.
+
+    virtualHosts = {
+      "pleroma.example.net" = {
+        http2 = true;
+        enableACME = true;
+        forceSSL = true;
+
+        locations."/" = {
+          proxyPass = "http://127.0.0.1:4000";
+
+          extraConfig = ''
+            etag on;
+            gzip on;
+
+            add_header 'Access-Control-Allow-Origin' '*' always;
+            add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
+            add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
+            add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
+            if ($request_method = OPTIONS) {
+              return 204;
+            }
+            add_header X-XSS-Protection "1; mode=block";
+            add_header X-Permitted-Cross-Domain-Policies none;
+            add_header X-Frame-Options DENY;
+            add_header X-Content-Type-Options nosniff;
+            add_header Referrer-Policy same-origin;
+            add_header X-Download-Options noopen;
+            proxy_http_version 1.1;
+            proxy_set_header Upgrade $http_upgrade;
+            proxy_set_header Connection "upgrade";
+            proxy_set_header Host $host;
+
+            client_max_body_size 16m;
+            # NOTE: increase if users need to upload very big files
+          '';
+        };
       };
     };
   };
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/networking/prosody.md b/nixpkgs/nixos/modules/services/networking/prosody.md
index 2da2c242a98b..d6eee4e29f0a 100644
--- a/nixpkgs/nixos/modules/services/networking/prosody.md
+++ b/nixpkgs/nixos/modules/services/networking/prosody.md
@@ -25,25 +25,27 @@ A good configuration to start with, including a
 [Multi User Chat (MUC)](https://xmpp.org/extensions/xep-0045.html)
 endpoint as well as a [HTTP File Upload](https://xmpp.org/extensions/xep-0363.html)
 endpoint will look like this:
-```
-services.prosody = {
-  enable = true;
-  admins = [ "root@example.org" ];
-  ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
-  ssl.key = "/var/lib/acme/example.org/key.pem";
-  virtualHosts."example.org" = {
-      enabled = true;
-      domain = "example.org";
-      ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
-      ssl.key = "/var/lib/acme/example.org/key.pem";
-  };
-  muc = [ {
-      domain = "conference.example.org";
-  } ];
-  uploadHttp = {
-      domain = "upload.example.org";
+```nix
+{
+  services.prosody = {
+    enable = true;
+    admins = [ "root@example.org" ];
+    ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
+    ssl.key = "/var/lib/acme/example.org/key.pem";
+    virtualHosts."example.org" = {
+        enabled = true;
+        domain = "example.org";
+        ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
+        ssl.key = "/var/lib/acme/example.org/key.pem";
+    };
+    muc = [ {
+        domain = "conference.example.org";
+    } ];
+    uploadHttp = {
+        domain = "upload.example.org";
+    };
   };
-};
+}
 ```
 
 ## Let's Encrypt Configuration {#module-services-prosody-letsencrypt}
@@ -57,16 +59,18 @@ certificate by leveraging the ACME
 
 Provided the setup detailed in the previous section, you'll need the following acme configuration to generate
 a TLS certificate for the three endponits:
-```
-security.acme = {
-  email = "root@example.org";
-  acceptTerms = true;
-  certs = {
-    "example.org" = {
-      webroot = "/var/www/example.org";
-      email = "root@example.org";
-      extraDomainNames = [ "conference.example.org" "upload.example.org" ];
+```nix
+{
+  security.acme = {
+    email = "root@example.org";
+    acceptTerms = true;
+    certs = {
+      "example.org" = {
+        webroot = "/var/www/example.org";
+        email = "root@example.org";
+        extraDomainNames = [ "conference.example.org" "upload.example.org" ];
+      };
     };
   };
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-control.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-control.nix
new file mode 100644
index 000000000000..fdf3a9ba3cc1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-control.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-control;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "cs";
+      config_dir = "/etc/scion";
+      reconnect_to_dispatcher = true;
+    };
+    beacon_db = {
+      connection = "/var/lib/scion-control/control.beacon.db";
+    };
+    path_db = {
+      connection = "/var/lib/scion-control/control.path.db";
+    };
+    trust_db = {
+      connection = "/var/lib/scion-control/control.trust.db";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-control.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-control = {
+    enable = mkEnableOption (lib.mdDoc "the scion-control service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          path_db = {
+            connection = "/var/lib/scion-control/control.path.db";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-control configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-control = {
+      description = "SCION Control Service";
+      after = [ "network-online.target" "scion-dispatcher.service" ];
+      wants = [ "network-online.target" "scion-dispatcher.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Group = if (config.services.scion.scion-dispatcher.enable == true) then "scion" else null;
+        ExecStart = "${pkgs.scion}/bin/scion-control --config ${configFile}";
+        DynamicUser = true;
+        Restart = "on-failure";
+        BindPaths = [ "/dev/shm:/run/shm" ];
+        StateDirectory = "scion-control";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix
new file mode 100644
index 000000000000..0bcc18771fc3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-daemon.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-daemon;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "sd";
+      config_dir = "/etc/scion";
+      reconnect_to_dispatcher = true;
+    };
+    path_db = {
+      connection = "/var/lib/scion-daemon/sd.path.db";
+    };
+    trust_db = {
+      connection = "/var/lib/scion-daemon/sd.trust.db";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-daemon.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-daemon = {
+    enable = mkEnableOption (lib.mdDoc "the scion-daemon service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          path_db = {
+            connection = "/var/lib/scion-daemon/sd.path.db";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-daemon configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-daemon = {
+      description = "SCION Daemon";
+      after = [ "network-online.target" "scion-dispatcher.service" ];
+      wants = [ "network-online.target" "scion-dispatcher.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.scion}/bin/scion-daemon --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        StateDirectory = "scion-daemon";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix
new file mode 100644
index 000000000000..bab1ec0a989b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-dispatcher.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-dispatcher;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    dispatcher = {
+      id = "dispatcher";
+      socket_file_mode = "0770";
+      application_socket = "/dev/shm/dispatcher/default.sock";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-dispatcher.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-dispatcher = {
+    enable = mkEnableOption (lib.mdDoc "the scion-dispatcher service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          dispatcher = {
+            id = "dispatcher";
+            socket_file_mode = "0770";
+            application_socket = "/dev/shm/dispatcher/default.sock";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-dispatcher configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    # Needed for group ownership of the dispatcher socket
+    users.groups.scion = {};
+
+    # scion programs hardcode path to dispatcher in /run/shm, and is not
+    # configurable at runtime upstream plans to obsolete the dispatcher in
+    # favor of an SCMP daemon, at which point this can be removed.
+    system.activationScripts.scion-dispatcher = ''
+      ln -sf /dev/shm /run/shm
+    '';
+
+    systemd.services.scion-dispatcher = {
+      description = "SCION Dispatcher";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Group = "scion";
+        DynamicUser = true;
+        BindPaths = [ "/dev/shm:/run/shm" ];
+        ExecStartPre = "${pkgs.coreutils}/bin/rm -rf /run/shm/dispatcher";
+        ExecStart = "${pkgs.scion}/bin/scion-dispatcher --config ${configFile}";
+        Restart = "on-failure";
+        StateDirectory = "scion-dispatcher";
+      };
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion-router.nix b/nixpkgs/nixos/modules/services/networking/scion/scion-router.nix
new file mode 100644
index 000000000000..cbe83c6dbf8d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion-router.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-router;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "br";
+      config_dir = "/etc/scion";
+    };
+  };
+  configFile = toml.generate "scion-router.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-router = {
+    enable = mkEnableOption (lib.mdDoc "the scion-router service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          general.id = "br";
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-router configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-router = {
+      description = "SCION Router";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.scion}/bin/scion-router --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        StateDirectory = "scion-router";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/scion/scion.nix b/nixpkgs/nixos/modules/services/networking/scion/scion.nix
new file mode 100644
index 000000000000..704f942b5d9e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/scion/scion.nix
@@ -0,0 +1,39 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion;
+in
+{
+  options.services.scion = {
+    enable = mkEnableOption (lib.mdDoc "all of the scion components and services");
+    bypassBootstrapWarning = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        bypass Nix warning about SCION PKI bootstrapping
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    services.scion = {
+      scion-dispatcher.enable = true;
+      scion-daemon.enable = true;
+      scion-router.enable = true;
+      scion-control.enable = true;
+    };
+    assertions = [
+      { assertion = cfg.bypassBootstrapWarning == true;
+        message = ''
+          SCION is a routing protocol and requires bootstrapping with a manual, imperative key signing ceremony. You may want to join an existing Isolation Domain (ISD) such as scionlab.org, or bootstrap your own. If you have completed and configured the public key infrastructure for SCION and are sure this process is complete, then add the following to your configuration:
+
+          services.scion.bypassBootstrapWarning = true;
+
+          refer to docs.scion.org for more information
+        '';
+      }
+    ];
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/yggdrasil.md b/nixpkgs/nixos/modules/services/networking/yggdrasil.md
index bbaea5bc74aa..7b899f9d6ddb 100644
--- a/nixpkgs/nixos/modules/services/networking/yggdrasil.md
+++ b/nixpkgs/nixos/modules/services/networking/yggdrasil.md
@@ -12,7 +12,7 @@ self-arranging IPv6 network.
 ### Simple ephemeral node {#module-services-networking-yggdrasil-configuration-simple}
 
 An annotated example of a simple configuration:
-```
+```nix
 {
   services.yggdrasil = {
     enable = true;
@@ -39,7 +39,7 @@ An annotated example of a simple configuration:
 ### Persistent node with prefix {#module-services-networking-yggdrasil-configuration-prefix}
 
 A node with a fixed address that announces a prefix:
-```
+```nix
 let
   address = "210:5217:69c0:9afc:1b95:b9f:8718:c3d2";
   prefix = "310:5217:69c0:9afc";
@@ -90,7 +90,7 @@ in {
 
 A NixOS container attached to the Yggdrasil network via a node running on the
 host:
-```
+```nix
 let
   yggPrefix64 = "310:5217:69c0:9afc";
     # Again, taken from the output of "yggdrasilctl getself".
diff --git a/nixpkgs/nixos/modules/services/search/meilisearch.md b/nixpkgs/nixos/modules/services/search/meilisearch.md
index 299f56bf8293..b9f65861b1d1 100644
--- a/nixpkgs/nixos/modules/services/search/meilisearch.md
+++ b/nixpkgs/nixos/modules/services/search/meilisearch.md
@@ -7,7 +7,9 @@ Meilisearch is a lightweight, fast and powerful search engine. Think elastic sea
 the minimum to start meilisearch is
 
 ```nix
-services.meilisearch.enable = true;
+{
+  services.meilisearch.enable = true;
+}
 ```
 
 this will start the http server included with meilisearch on port 7700.
diff --git a/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix b/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix
index b8e45f67cf78..dd3ded6259c4 100644
--- a/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix
+++ b/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix
@@ -13,6 +13,17 @@ in
         The address of the reverse proxy endpoint for oauth2_proxy
       '';
     };
+
+    domain = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The domain under which the oauth2_proxy will be accesible and the path of cookies are set to.
+        This setting must be set to ensure back-redirects are working properly
+        if oauth2-proxy is configured with {option}`services.oauth2_proxy.cookie.domain`
+        or multiple {option}`services.oauth2_proxy.nginx.virtualHosts` that are not on the same domain.
+      '';
+    };
+
     virtualHosts = mkOption {
       type = types.listOf types.str;
       default = [];
@@ -21,22 +32,26 @@ in
       '';
     };
   };
+
   config.services.oauth2_proxy = mkIf (cfg.virtualHosts != [] && (hasPrefix "127.0.0.1:" cfg.proxy)) {
     enable = true;
   };
-  config.services.nginx = mkIf config.services.oauth2_proxy.enable (mkMerge
-  ((optional (cfg.virtualHosts != []) {
-    recommendedProxySettings = true; # needed because duplicate headers
-  }) ++ (map (vhost: {
-    virtualHosts.${vhost} = {
-      locations."/oauth2/" = {
+
+  config.services.nginx = mkIf (cfg.virtualHosts != [] && config.services.oauth2_proxy.enable) (mkMerge ([
+    {
+      virtualHosts.${cfg.domain}.locations."/oauth2/" = {
         proxyPass = cfg.proxy;
         extraConfig = ''
           proxy_set_header X-Scheme                $scheme;
           proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
         '';
       };
-      locations."/oauth2/auth" = {
+    }
+  ] ++ optional (cfg.virtualHosts != []) {
+    recommendedProxySettings = true; # needed because duplicate headers
+  } ++ (map (vhost: {
+    virtualHosts.${vhost}.locations = {
+      "/oauth2/auth" = {
         proxyPass = cfg.proxy;
         extraConfig = ''
           proxy_set_header X-Scheme         $scheme;
@@ -45,9 +60,10 @@ in
           proxy_pass_request_body           off;
         '';
       };
-      locations."/".extraConfig = ''
+      "@redirectToAuth2ProxyLogin".return = "307 https://${cfg.domain}/oauth2/start?rd=$scheme://$host$request_uri";
+      "/".extraConfig = ''
         auth_request /oauth2/auth;
-        error_page 401 = /oauth2/sign_in;
+        error_page 401 = @redirectToAuth2ProxyLogin;
 
         # pass information via X-User and X-Email headers to backend,
         # requires running with --set-xauthrequest flag
@@ -60,7 +76,6 @@ in
         auth_request_set $auth_cookie $upstream_http_set_cookie;
         add_header Set-Cookie $auth_cookie;
       '';
-
     };
   }) cfg.virtualHosts)));
 }
diff --git a/nixpkgs/nixos/modules/services/security/usbguard.nix b/nixpkgs/nixos/modules/services/security/usbguard.nix
index f167fbb2eca8..ff54176e13d3 100644
--- a/nixpkgs/nixos/modules/services/security/usbguard.nix
+++ b/nixpkgs/nixos/modules/services/security/usbguard.nix
@@ -80,7 +80,7 @@ in
       };
 
       implicitPolicyTarget = mkOption {
-        type = policy;
+        type = types.enum [ "allow" "block" "reject" ];
         default = "block";
         description = lib.mdDoc ''
           How to treat USB devices that don't match any rule in the policy.
@@ -110,7 +110,7 @@ in
       };
 
       insertedDevicePolicy = mkOption {
-        type = policy;
+        type = types.enum [ "block" "reject" "apply-policy" ];
         default = "apply-policy";
         description = lib.mdDoc ''
           How to treat USB devices that are already connected after the daemon
diff --git a/nixpkgs/nixos/modules/services/video/photonvision.nix b/nixpkgs/nixos/modules/services/video/photonvision.nix
new file mode 100644
index 000000000000..fdbe9da3999d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/photonvision.nix
@@ -0,0 +1,64 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.photonvision;
+in
+{
+  options = {
+    services.photonvision = {
+      enable = lib.mkEnableOption (lib.mdDoc "Enable PhotonVision");
+
+      package = lib.mkPackageOption pkgs "photonvision" {};
+
+      openFirewall = lib.mkOption {
+        description = lib.mdDoc ''
+          Whether to open the required ports in the firewall.
+        '';
+        default = false;
+        type = lib.types.bool;
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.photonvision = {
+      description = "PhotonVision, the free, fast, and easy-to-use computer vision solution for the FIRST Robotics Competition";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart = lib.getExe cfg.package;
+
+        # ephemeral root directory
+        RuntimeDirectory = "photonvision";
+        RootDirectory = "/run/photonvision";
+
+        # setup persistent state and logs directories
+        StateDirectory = "photonvision";
+        LogsDirectory = "photonvision";
+
+        BindReadOnlyPaths = [
+          # mount the nix store read-only
+          "/nix/store"
+
+          # the JRE reads the user.home property from /etc/passwd
+          "/etc/passwd"
+        ];
+        BindPaths = [
+          # mount the configuration and logs directories to the host
+          "/var/lib/photonvision:/photonvision_config"
+          "/var/log/photonvision:/photonvision_config/logs"
+        ];
+
+        # for PhotonVision's dynamic libraries, which it writes to /tmp
+        PrivateTmp = true;
+      };
+    };
+
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 5800 ];
+      allowedTCPPortRanges = [{ from = 1180; to = 1190; }];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/akkoma.md b/nixpkgs/nixos/modules/services/web-apps/akkoma.md
index 83dd1a8b35f2..13b074b228a4 100644
--- a/nixpkgs/nixos/modules/services/web-apps/akkoma.md
+++ b/nixpkgs/nixos/modules/services/web-apps/akkoma.md
@@ -19,21 +19,23 @@ be run behind a HTTP proxy on `fediverse.example.com`.
 
 
 ```nix
-services.akkoma.enable = true;
-services.akkoma.config = {
-  ":pleroma" = {
-    ":instance" = {
-      name = "My Akkoma instance";
-      description = "More detailed description";
-      email = "admin@example.com";
-      registration_open = false;
-    };
-
-    "Pleroma.Web.Endpoint" = {
-      url.host = "fediverse.example.com";
+{
+  services.akkoma.enable = true;
+  services.akkoma.config = {
+    ":pleroma" = {
+      ":instance" = {
+        name = "My Akkoma instance";
+        description = "More detailed description";
+        email = "admin@example.com";
+        registration_open = false;
+      };
+
+      "Pleroma.Web.Endpoint" = {
+        url.host = "fediverse.example.com";
+      };
     };
   };
-};
+}
 ```
 
 Please refer to the [configuration cheat sheet](https://docs.akkoma.dev/stable/configuration/cheatsheet/)
@@ -55,19 +57,21 @@ Although it is possible to expose Akkoma directly, it is common practice to oper
 HTTP reverse proxy such as nginx.
 
 ```nix
-services.akkoma.nginx = {
-  enableACME = true;
-  forceSSL = true;
-};
-
-services.nginx = {
-  enable = true;
-
-  clientMaxBodySize = "16m";
-  recommendedTlsSettings = true;
-  recommendedOptimisation = true;
-  recommendedGzipSettings = true;
-};
+{
+  services.akkoma.nginx = {
+    enableACME = true;
+    forceSSL = true;
+  };
+
+  services.nginx = {
+    enable = true;
+
+    clientMaxBodySize = "16m";
+    recommendedTlsSettings = true;
+    recommendedOptimisation = true;
+    recommendedGzipSettings = true;
+  };
+}
 ```
 
 Please refer to [](#module-security-acme) for details on how to provision an SSL/TLS certificate.
@@ -78,51 +82,53 @@ Without the media proxy function, Akkoma does not store any remote media like pi
 locally, and clients have to fetch them directly from the source server.
 
 ```nix
-# Enable nginx slice module distributed with Tengine
-services.nginx.package = pkgs.tengine;
-
-# Enable media proxy
-services.akkoma.config.":pleroma".":media_proxy" = {
-  enabled = true;
-  proxy_opts.redirect_on_failure = true;
-};
-
-# Adjust the persistent cache size as needed:
-#  Assuming an average object size of 128 KiB, around 1 MiB
-#  of memory is required for the key zone per GiB of cache.
-# Ensure that the cache directory exists and is writable by nginx.
-services.nginx.commonHttpConfig = ''
-  proxy_cache_path /var/cache/nginx/cache/akkoma-media-cache
-    levels= keys_zone=akkoma_media_cache:16m max_size=16g
-    inactive=1y use_temp_path=off;
-'';
-
-services.akkoma.nginx = {
-  locations."/proxy" = {
-    proxyPass = "http://unix:/run/akkoma/socket";
-
-    extraConfig = ''
-      proxy_cache akkoma_media_cache;
-
-      # Cache objects in slices of 1 MiB
-      slice 1m;
-      proxy_cache_key $host$uri$is_args$args$slice_range;
-      proxy_set_header Range $slice_range;
-
-      # Decouple proxy and upstream responses
-      proxy_buffering on;
-      proxy_cache_lock on;
-      proxy_ignore_client_abort on;
-
-      # Default cache times for various responses
-      proxy_cache_valid 200 1y;
-      proxy_cache_valid 206 301 304 1h;
-
-      # Allow serving of stale items
-      proxy_cache_use_stale error timeout invalid_header updating;
-    '';
+{
+  # Enable nginx slice module distributed with Tengine
+  services.nginx.package = pkgs.tengine;
+
+  # Enable media proxy
+  services.akkoma.config.":pleroma".":media_proxy" = {
+    enabled = true;
+    proxy_opts.redirect_on_failure = true;
   };
-};
+
+  # Adjust the persistent cache size as needed:
+  #  Assuming an average object size of 128 KiB, around 1 MiB
+  #  of memory is required for the key zone per GiB of cache.
+  # Ensure that the cache directory exists and is writable by nginx.
+  services.nginx.commonHttpConfig = ''
+    proxy_cache_path /var/cache/nginx/cache/akkoma-media-cache
+      levels= keys_zone=akkoma_media_cache:16m max_size=16g
+      inactive=1y use_temp_path=off;
+  '';
+
+  services.akkoma.nginx = {
+    locations."/proxy" = {
+      proxyPass = "http://unix:/run/akkoma/socket";
+
+      extraConfig = ''
+        proxy_cache akkoma_media_cache;
+
+        # Cache objects in slices of 1 MiB
+        slice 1m;
+        proxy_cache_key $host$uri$is_args$args$slice_range;
+        proxy_set_header Range $slice_range;
+
+        # Decouple proxy and upstream responses
+        proxy_buffering on;
+        proxy_cache_lock on;
+        proxy_ignore_client_abort on;
+
+        # Default cache times for various responses
+        proxy_cache_valid 200 1y;
+        proxy_cache_valid 206 301 304 1h;
+
+        # Allow serving of stale items
+        proxy_cache_use_stale error timeout invalid_header updating;
+      '';
+    };
+  };
+}
 ```
 
 #### Prefetch remote media {#modules-services-akkoma-prefetch-remote-media}
@@ -132,10 +138,12 @@ fetches all media associated with a post through the media proxy, as soon as the
 received by the instance.
 
 ```nix
-services.akkoma.config.":pleroma".":mrf".policies =
-  map (pkgs.formats.elixirConf { }).lib.mkRaw [
-    "Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy"
-];
+{
+  services.akkoma.config.":pleroma".":mrf".policies =
+    map (pkgs.formats.elixirConf { }).lib.mkRaw [
+      "Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy"
+  ];
+}
 ```
 
 #### Media previews {#modules-services-akkoma-media-previews}
@@ -143,11 +151,13 @@ services.akkoma.config.":pleroma".":mrf".policies =
 Akkoma can generate previews for media.
 
 ```nix
-services.akkoma.config.":pleroma".":media_preview_proxy" = {
-  enabled = true;
-  thumbnail_max_width = 1920;
-  thumbnail_max_height = 1080;
-};
+{
+  services.akkoma.config.":pleroma".":media_preview_proxy" = {
+    enabled = true;
+    thumbnail_max_width = 1920;
+    thumbnail_max_height = 1080;
+  };
+}
 ```
 
 ## Frontend management {#modules-services-akkoma-frontend-management}
@@ -160,29 +170,31 @@ The following example overrides the primary frontend’s default configuration u
 derivation.
 
 ```nix
-services.akkoma.frontends.primary.package = pkgs.runCommand "akkoma-fe" {
-  config = builtins.toJSON {
-    expertLevel = 1;
-    collapseMessageWithSubject = false;
-    stopGifs = false;
-    replyVisibility = "following";
-    webPushHideIfCW = true;
-    hideScopeNotice = true;
-    renderMisskeyMarkdown = false;
-    hideSiteFavicon = true;
-    postContentType = "text/markdown";
-    showNavShortcuts = false;
-  };
-  nativeBuildInputs = with pkgs; [ jq xorg.lndir ];
-  passAsFile = [ "config" ];
-} ''
-  mkdir $out
-  lndir ${pkgs.akkoma-frontends.akkoma-fe} $out
-
-  rm $out/static/config.json
-  jq -s add ${pkgs.akkoma-frontends.akkoma-fe}/static/config.json ${config} \
-    >$out/static/config.json
-'';
+{
+  services.akkoma.frontends.primary.package = pkgs.runCommand "akkoma-fe" {
+    config = builtins.toJSON {
+      expertLevel = 1;
+      collapseMessageWithSubject = false;
+      stopGifs = false;
+      replyVisibility = "following";
+      webPushHideIfCW = true;
+      hideScopeNotice = true;
+      renderMisskeyMarkdown = false;
+      hideSiteFavicon = true;
+      postContentType = "text/markdown";
+      showNavShortcuts = false;
+    };
+    nativeBuildInputs = with pkgs; [ jq xorg.lndir ];
+    passAsFile = [ "config" ];
+  } ''
+    mkdir $out
+    lndir ${pkgs.akkoma-frontends.akkoma-fe} $out
+
+    rm $out/static/config.json
+    jq -s add ${pkgs.akkoma-frontends.akkoma-fe}/static/config.json ${config} \
+      >$out/static/config.json
+  '';
+}
 ```
 
 ## Federation policies {#modules-services-akkoma-federation-policies}
@@ -198,28 +210,30 @@ of the fediverse and providing a pleasant experience to the users of an instance
 
 
 ```nix
-services.akkoma.config.":pleroma" = with (pkgs.formats.elixirConf { }).lib; {
-  ":mrf".policies = map mkRaw [
-    "Pleroma.Web.ActivityPub.MRF.SimplePolicy"
-  ];
-
-  ":mrf_simple" = {
-    # Tag all media as sensitive
-    media_nsfw = mkMap {
-      "nsfw.weird.kinky" = "Untagged NSFW content";
-    };
-
-    # Reject all activities except deletes
-    reject = mkMap {
-      "kiwifarms.cc" = "Persistent harassment of users, no moderation";
-    };
-
-    # Force posts to be visible by followers only
-    followers_only = mkMap {
-      "beta.birdsite.live" = "Avoid polluting timelines with Twitter posts";
+{
+  services.akkoma.config.":pleroma" = with (pkgs.formats.elixirConf { }).lib; {
+    ":mrf".policies = map mkRaw [
+      "Pleroma.Web.ActivityPub.MRF.SimplePolicy"
+    ];
+
+    ":mrf_simple" = {
+      # Tag all media as sensitive
+      media_nsfw = mkMap {
+        "nsfw.weird.kinky" = "Untagged NSFW content";
+      };
+
+      # Reject all activities except deletes
+      reject = mkMap {
+        "kiwifarms.cc" = "Persistent harassment of users, no moderation";
+      };
+
+      # Force posts to be visible by followers only
+      followers_only = mkMap {
+        "beta.birdsite.live" = "Avoid polluting timelines with Twitter posts";
+      };
     };
   };
-};
+}
 ```
 
 ## Upload filters {#modules-services-akkoma-upload-filters}
@@ -228,12 +242,14 @@ This example strips GPS and location metadata from uploads, deduplicates them an
 the file name.
 
 ```nix
-services.akkoma.config.":pleroma"."Pleroma.Upload".filters =
-  map (pkgs.formats.elixirConf { }).lib.mkRaw [
-    "Pleroma.Upload.Filter.Exiftool"
-    "Pleroma.Upload.Filter.Dedupe"
-    "Pleroma.Upload.Filter.AnonymizeFilename"
-  ];
+{
+  services.akkoma.config.":pleroma"."Pleroma.Upload".filters =
+    map (pkgs.formats.elixirConf { }).lib.mkRaw [
+      "Pleroma.Upload.Filter.Exiftool"
+      "Pleroma.Upload.Filter.Dedupe"
+      "Pleroma.Upload.Filter.AnonymizeFilename"
+    ];
+}
 ```
 
 ## Migration from Pleroma {#modules-services-akkoma-migration-pleroma}
@@ -286,9 +302,11 @@ To re‐use the Pleroma data in place, disable Pleroma and enable Akkoma, pointi
 Pleroma database and upload directory.
 
 ```nix
-# Adjust these settings according to the database name and upload directory path used by Pleroma
-services.akkoma.config.":pleroma"."Pleroma.Repo".database = "pleroma";
-services.akkoma.config.":pleroma".":instance".upload_dir = "/var/lib/pleroma/uploads";
+{
+  # Adjust these settings according to the database name and upload directory path used by Pleroma
+  services.akkoma.config.":pleroma"."Pleroma.Repo".database = "pleroma";
+  services.akkoma.config.":pleroma".":instance".upload_dir = "/var/lib/pleroma/uploads";
+}
 ```
 
 Please keep in mind that after the Akkoma service has been started, any migrations applied by
@@ -304,7 +322,9 @@ details.
 The Akkoma systemd service may be confined to a chroot with
 
 ```nix
-services.systemd.akkoma.confinement.enable = true;
+{
+  services.systemd.akkoma.confinement.enable = true;
+}
 ```
 
 Confinement of services is not generally supported in NixOS and therefore disabled by default.
diff --git a/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md b/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md
index 236953bd4ff7..d8e59b3ad210 100644
--- a/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md
+++ b/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md
@@ -4,7 +4,7 @@ c2FmZQ is an application that can securely encrypt, store, and share files,
 including but not limited to pictures and videos.
 
 The service `c2fmzq-server` can be enabled by setting
-```
+```nix
 {
   services.c2fmzq-server.enable = true;
 }
@@ -17,7 +17,7 @@ In principle the server can be exposed directly on a public interface and there
 are command line options to manage HTTPS certificates directly, but the module
 is designed to be served behind a reverse proxy or only accessed via localhost.
 
-```
+```nix
 {
   services.c2fmzq-server = {
     enable = true;
diff --git a/nixpkgs/nixos/modules/services/web-apps/discourse.md b/nixpkgs/nixos/modules/services/web-apps/discourse.md
index 35180bea87d9..d4b9c93c4ead 100644
--- a/nixpkgs/nixos/modules/services/web-apps/discourse.md
+++ b/nixpkgs/nixos/modules/services/web-apps/discourse.md
@@ -6,20 +6,22 @@ modern and open source discussion platform.
 ## Basic usage {#module-services-discourse-basic-usage}
 
 A minimal configuration using Let's Encrypt for TLS certificates looks like this:
-```
-services.discourse = {
-  enable = true;
-  hostname = "discourse.example.com";
-  admin = {
-    email = "admin@example.com";
-    username = "admin";
-    fullName = "Administrator";
-    passwordFile = "/path/to/password_file";
+```nix
+{
+  services.discourse = {
+    enable = true;
+    hostname = "discourse.example.com";
+    admin = {
+      email = "admin@example.com";
+      username = "admin";
+      fullName = "Administrator";
+      passwordFile = "/path/to/password_file";
+    };
+    secretKeyBaseFile = "/path/to/secret_key_base_file";
   };
-  secretKeyBaseFile = "/path/to/secret_key_base_file";
-};
-security.acme.email = "me@example.com";
-security.acme.acceptTerms = true;
+  security.acme.email = "me@example.com";
+  security.acme.acceptTerms = true;
+}
 ```
 
 Provided a proper DNS setup, you'll be able to connect to the
@@ -34,20 +36,22 @@ the [](#opt-services.discourse.sslCertificate)
 and [](#opt-services.discourse.sslCertificateKey)
 options:
 
-```
-services.discourse = {
-  enable = true;
-  hostname = "discourse.example.com";
-  sslCertificate = "/path/to/ssl_certificate";
-  sslCertificateKey = "/path/to/ssl_certificate_key";
-  admin = {
-    email = "admin@example.com";
-    username = "admin";
-    fullName = "Administrator";
-    passwordFile = "/path/to/password_file";
+```nix
+{
+  services.discourse = {
+    enable = true;
+    hostname = "discourse.example.com";
+    sslCertificate = "/path/to/ssl_certificate";
+    sslCertificateKey = "/path/to/ssl_certificate_key";
+    admin = {
+      email = "admin@example.com";
+      username = "admin";
+      fullName = "Administrator";
+      passwordFile = "/path/to/password_file";
+    };
+    secretKeyBaseFile = "/path/to/secret_key_base_file";
   };
-  secretKeyBaseFile = "/path/to/secret_key_base_file";
-};
+}
 ```
 
 ## Database access {#module-services-discourse-database}
@@ -80,27 +84,29 @@ A basic setup which assumes you want to use your configured
 [hostname](#opt-services.discourse.hostname) as
 email domain can be done like this:
 
-```
-services.discourse = {
-  enable = true;
-  hostname = "discourse.example.com";
-  sslCertificate = "/path/to/ssl_certificate";
-  sslCertificateKey = "/path/to/ssl_certificate_key";
-  admin = {
-    email = "admin@example.com";
-    username = "admin";
-    fullName = "Administrator";
-    passwordFile = "/path/to/password_file";
-  };
-  mail.outgoing = {
-    serverAddress = "smtp.emailprovider.com";
-    port = 587;
-    username = "user@emailprovider.com";
-    passwordFile = "/path/to/smtp_password_file";
+```nix
+{
+  services.discourse = {
+    enable = true;
+    hostname = "discourse.example.com";
+    sslCertificate = "/path/to/ssl_certificate";
+    sslCertificateKey = "/path/to/ssl_certificate_key";
+    admin = {
+      email = "admin@example.com";
+      username = "admin";
+      fullName = "Administrator";
+      passwordFile = "/path/to/password_file";
+    };
+    mail.outgoing = {
+      serverAddress = "smtp.emailprovider.com";
+      port = 587;
+      username = "user@emailprovider.com";
+      passwordFile = "/path/to/smtp_password_file";
+    };
+    mail.incoming.enable = true;
+    secretKeyBaseFile = "/path/to/secret_key_base_file";
   };
-  mail.incoming.enable = true;
-  secretKeyBaseFile = "/path/to/secret_key_base_file";
-};
+}
 ```
 
 This assumes you have set up an MX record for the address you've
@@ -162,44 +168,46 @@ The following example sets the title and description of the
 Discourse instance and enables
 GitHub login in the site settings,
 and changes a few request limits in the backend settings:
-```
-services.discourse = {
-  enable = true;
-  hostname = "discourse.example.com";
-  sslCertificate = "/path/to/ssl_certificate";
-  sslCertificateKey = "/path/to/ssl_certificate_key";
-  admin = {
-    email = "admin@example.com";
-    username = "admin";
-    fullName = "Administrator";
-    passwordFile = "/path/to/password_file";
-  };
-  mail.outgoing = {
-    serverAddress = "smtp.emailprovider.com";
-    port = 587;
-    username = "user@emailprovider.com";
-    passwordFile = "/path/to/smtp_password_file";
-  };
-  mail.incoming.enable = true;
-  siteSettings = {
-    required = {
-      title = "My Cats";
-      site_description = "Discuss My Cats (and be nice plz)";
+```nix
+{
+  services.discourse = {
+    enable = true;
+    hostname = "discourse.example.com";
+    sslCertificate = "/path/to/ssl_certificate";
+    sslCertificateKey = "/path/to/ssl_certificate_key";
+    admin = {
+      email = "admin@example.com";
+      username = "admin";
+      fullName = "Administrator";
+      passwordFile = "/path/to/password_file";
     };
-    login = {
-      enable_github_logins = true;
-      github_client_id = "a2f6dfe838cb3206ce20";
-      github_client_secret._secret = /run/keys/discourse_github_client_secret;
+    mail.outgoing = {
+      serverAddress = "smtp.emailprovider.com";
+      port = 587;
+      username = "user@emailprovider.com";
+      passwordFile = "/path/to/smtp_password_file";
     };
+    mail.incoming.enable = true;
+    siteSettings = {
+      required = {
+        title = "My Cats";
+        site_description = "Discuss My Cats (and be nice plz)";
+      };
+      login = {
+        enable_github_logins = true;
+        github_client_id = "a2f6dfe838cb3206ce20";
+        github_client_secret._secret = /run/keys/discourse_github_client_secret;
+      };
+    };
+    backendSettings = {
+      max_reqs_per_ip_per_minute = 300;
+      max_reqs_per_ip_per_10_seconds = 60;
+      max_asset_reqs_per_ip_per_10_seconds = 250;
+      max_reqs_per_ip_mode = "warn+block";
+    };
+    secretKeyBaseFile = "/path/to/secret_key_base_file";
   };
-  backendSettings = {
-    max_reqs_per_ip_per_minute = 300;
-    max_reqs_per_ip_per_10_seconds = 60;
-    max_asset_reqs_per_ip_per_10_seconds = 250;
-    max_reqs_per_ip_mode = "warn+block";
-  };
-  secretKeyBaseFile = "/path/to/secret_key_base_file";
-};
+}
 ```
 
 In the resulting site settings file, the
@@ -253,34 +261,36 @@ and [discourse-solved](https://github.com/discourse/discourse-solved)
 plugins, and disable `discourse-spoiler-alert`
 by default:
 
-```
-services.discourse = {
-  enable = true;
-  hostname = "discourse.example.com";
-  sslCertificate = "/path/to/ssl_certificate";
-  sslCertificateKey = "/path/to/ssl_certificate_key";
-  admin = {
-    email = "admin@example.com";
-    username = "admin";
-    fullName = "Administrator";
-    passwordFile = "/path/to/password_file";
-  };
-  mail.outgoing = {
-    serverAddress = "smtp.emailprovider.com";
-    port = 587;
-    username = "user@emailprovider.com";
-    passwordFile = "/path/to/smtp_password_file";
-  };
-  mail.incoming.enable = true;
-  plugins = with config.services.discourse.package.plugins; [
-    discourse-spoiler-alert
-    discourse-solved
-  ];
-  siteSettings = {
-    plugins = {
-      spoiler_enabled = false;
+```nix
+{
+  services.discourse = {
+    enable = true;
+    hostname = "discourse.example.com";
+    sslCertificate = "/path/to/ssl_certificate";
+    sslCertificateKey = "/path/to/ssl_certificate_key";
+    admin = {
+      email = "admin@example.com";
+      username = "admin";
+      fullName = "Administrator";
+      passwordFile = "/path/to/password_file";
+    };
+    mail.outgoing = {
+      serverAddress = "smtp.emailprovider.com";
+      port = 587;
+      username = "user@emailprovider.com";
+      passwordFile = "/path/to/smtp_password_file";
+    };
+    mail.incoming.enable = true;
+    plugins = with config.services.discourse.package.plugins; [
+      discourse-spoiler-alert
+      discourse-solved
+    ];
+    siteSettings = {
+      plugins = {
+        spoiler_enabled = false;
+      };
     };
+    secretKeyBaseFile = "/path/to/secret_key_base_file";
   };
-  secretKeyBaseFile = "/path/to/secret_key_base_file";
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/web-apps/gotosocial.md b/nixpkgs/nixos/modules/services/web-apps/gotosocial.md
index a290d7d1893a..b3540f0d5811 100644
--- a/nixpkgs/nixos/modules/services/web-apps/gotosocial.md
+++ b/nixpkgs/nixos/modules/services/web-apps/gotosocial.md
@@ -8,17 +8,19 @@ The following configuration sets up the PostgreSQL as database backend and binds
 GoToSocial to `127.0.0.1:8080`, expecting to be run behind a HTTP proxy on `gotosocial.example.com`.
 
 ```nix
-services.gotosocial = {
-  enable = true;
-  setupPostgresqlDB = true;
-  settings = {
-    application-name = "My GoToSocial";
-    host = "gotosocial.example.com";
-    protocol = "https";
-    bind-address = "127.0.0.1";
-    port = 8080;
+{
+  services.gotosocial = {
+    enable = true;
+    setupPostgresqlDB = true;
+    settings = {
+      application-name = "My GoToSocial";
+      host = "gotosocial.example.com";
+      protocol = "https";
+      bind-address = "127.0.0.1";
+      port = 8080;
+    };
   };
-};
+}
 ```
 
 Please refer to the [GoToSocial Documentation](https://docs.gotosocial.org/en/latest/configuration/general/)
@@ -30,24 +32,26 @@ Although it is possible to expose GoToSocial directly, it is common practice to
 HTTP reverse proxy such as nginx.
 
 ```nix
-networking.firewall.allowedTCPPorts = [ 80 443 ];
-services.nginx = {
-  enable = true;
-  clientMaxBodySize = "40M";
-  virtualHosts = with config.services.gotosocial.settings; {
-    "${host}" = {
-      enableACME = true;
-      forceSSL = true;
-      locations = {
-        "/" = {
-          recommendedProxySettings = true;
-          proxyWebsockets = true;
-          proxyPass = "http://${bind-address}:${toString port}";
+{
+  networking.firewall.allowedTCPPorts = [ 80 443 ];
+  services.nginx = {
+    enable = true;
+    clientMaxBodySize = "40M";
+    virtualHosts = with config.services.gotosocial.settings; {
+      "${host}" = {
+        enableACME = true;
+        forceSSL = true;
+        locations = {
+          "/" = {
+            recommendedProxySettings = true;
+            proxyWebsockets = true;
+            proxyPass = "http://${bind-address}:${toString port}";
+          };
         };
       };
     };
   };
-};
+}
 ```
 
 Please refer to [](#module-security-acme) for details on how to provision an SSL/TLS certificate.
diff --git a/nixpkgs/nixos/modules/services/web-apps/grocy.md b/nixpkgs/nixos/modules/services/web-apps/grocy.md
index 62aad4b103df..f4b5769c2479 100644
--- a/nixpkgs/nixos/modules/services/web-apps/grocy.md
+++ b/nixpkgs/nixos/modules/services/web-apps/grocy.md
@@ -6,7 +6,7 @@
 ## Basic usage {#module-services-grocy-basic-usage}
 
 A very basic configuration may look like this:
-```
+```nix
 { pkgs, ... }:
 {
   services.grocy = {
@@ -29,7 +29,7 @@ of the application.
 
 The configuration for `grocy` is located at `/etc/grocy/config.php`.
 By default, the following settings can be defined in the NixOS-configuration:
-```
+```nix
 { pkgs, ... }:
 {
   services.grocy.settings = {
@@ -56,7 +56,7 @@ By default, the following settings can be defined in the NixOS-configuration:
 
 If you want to alter the configuration file on your own, you can do this manually with
 an expression like this:
-```
+```nix
 { lib, ... }:
 {
   environment.etc."grocy/config.php".text = lib.mkAfter ''
diff --git a/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md b/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md
index 060ef9752650..577f82e315be 100644
--- a/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md
+++ b/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md
@@ -6,7 +6,7 @@ private, self-hosted video conferencing solution.
 ## Basic usage {#module-services-jitsi-basic-usage}
 
 A minimal configuration using Let's Encrypt for TLS certificates looks like this:
-```
+```nix
 {
   services.jitsi-meet = {
     enable = true;
@@ -22,7 +22,7 @@ A minimal configuration using Let's Encrypt for TLS certificates looks like this
 ## Configuration {#module-services-jitsi-configuration}
 
 Here is the minimal configuration with additional configurations:
-```
+```nix
 {
   services.jitsi-meet = {
     enable = true;
diff --git a/nixpkgs/nixos/modules/services/web-apps/kavita.nix b/nixpkgs/nixos/modules/services/web-apps/kavita.nix
index c3e39f0b5476..c90697bcfa8b 100644
--- a/nixpkgs/nixos/modules/services/web-apps/kavita.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/kavita.nix
@@ -2,7 +2,18 @@
 
 let
   cfg = config.services.kavita;
-in {
+  settingsFormat = pkgs.formats.json { };
+  appsettings = settingsFormat.generate "appsettings.json" ({ TokenKey = "@TOKEN@"; } // cfg.settings);
+in
+{
+  imports = [
+    (lib.mkChangedOptionModule [ "services" "kavita" "ipAdresses" ] [ "services" "kavita" "settings" "IpAddresses" ] (config:
+      let value = lib.getAttrFromPath [ "services" "kavita" "ipAdresses" ] config; in
+      lib.concatStringsSep "," value
+    ))
+    (lib.mkRenamedOptionModule [ "services" "kavita" "port" ] [ "services" "kavita" "settings" "Port" ])
+  ];
+
   options.services.kavita = {
     enable = lib.mkEnableOption (lib.mdDoc "Kavita reading server");
 
@@ -27,16 +38,31 @@ in {
         It can be generated with `head -c 32 /dev/urandom | base64`.
       '';
     };
-    port = lib.mkOption {
-      default = 5000;
-      type = lib.types.port;
-      description = lib.mdDoc "Port to bind to.";
-    };
-    ipAdresses = lib.mkOption {
-      default = ["0.0.0.0" "::"];
-      type = lib.types.listOf lib.types.str;
-      description = lib.mdDoc "IP Addresses to bind to. The default is to bind
-      to all IPv4 and IPv6 addresses.";
+
+    settings = lib.mkOption {
+      default = { };
+      description = lib.mdDoc ''
+        Kavita configuration options, as configured in {file}`appsettings.json`.
+      '';
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+
+        options = {
+          Port = lib.mkOption {
+            default = 5000;
+            type = lib.types.port;
+            description = lib.mdDoc "Port to bind to.";
+          };
+
+          IpAddresses = lib.mkOption {
+            default = "0.0.0.0,::";
+            type = lib.types.commas;
+            description = lib.mdDoc ''
+              IP Addresses to bind to. The default is to bind to all IPv4 and IPv6 addresses.
+            '';
+          };
+        };
+      };
     };
   };
 
@@ -46,18 +72,15 @@ in {
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
       preStart = ''
-        umask u=rwx,g=rx,o=
-        cat > "${cfg.dataDir}/config/appsettings.json" <<EOF
-        {
-          "TokenKey": "$(cat ${cfg.tokenKeyFile})",
-          "Port": ${toString cfg.port},
-          "IpAddresses": "${lib.concatStringsSep "," cfg.ipAdresses}"
-        }
-        EOF
+        install -m600 ${appsettings} ${lib.escapeShellArg cfg.dataDir}/config/appsettings.json
+        ${pkgs.replace-secret}/bin/replace-secret '@TOKEN@' \
+          ''${CREDENTIALS_DIRECTORY}/token \
+          '${cfg.dataDir}/config/appsettings.json'
       '';
       serviceConfig = {
         WorkingDirectory = cfg.dataDir;
-        ExecStart = "${lib.getExe cfg.package}";
+        LoadCredential = [ "token:${cfg.tokenKeyFile}" ];
+        ExecStart = lib.getExe cfg.package;
         Restart = "always";
         User = cfg.user;
       };
diff --git a/nixpkgs/nixos/modules/services/web-apps/keycloak.md b/nixpkgs/nixos/modules/services/web-apps/keycloak.md
index aa8de40d642b..020bee400348 100644
--- a/nixpkgs/nixos/modules/services/web-apps/keycloak.md
+++ b/nixpkgs/nixos/modules/services/web-apps/keycloak.md
@@ -126,16 +126,18 @@ should be set to. See the description of
 ## Example configuration {#module-services-keycloak-example-config}
 
 A basic configuration with some custom settings could look like this:
-```
-services.keycloak = {
-  enable = true;
-  settings = {
-    hostname = "keycloak.example.com";
-    hostname-strict-backchannel = true;
+```nix
+{
+  services.keycloak = {
+    enable = true;
+    settings = {
+      hostname = "keycloak.example.com";
+      hostname-strict-backchannel = true;
+    };
+    initialAdminPassword = "e6Wcm0RrtegMEHl";  # change on first login
+    sslCertificate = "/run/keys/ssl_cert";
+    sslCertificateKey = "/run/keys/ssl_key";
+    database.passwordFile = "/run/keys/db_password";
   };
-  initialAdminPassword = "e6Wcm0RrtegMEHl";  # change on first login
-  sslCertificate = "/run/keys/ssl_cert";
-  sslCertificateKey = "/run/keys/ssl_key";
-  database.passwordFile = "/run/keys/db_password";
-};
+}
 ```
diff --git a/nixpkgs/nixos/modules/services/web-apps/lemmy.md b/nixpkgs/nixos/modules/services/web-apps/lemmy.md
index faafe096d138..0ed23607d00b 100644
--- a/nixpkgs/nixos/modules/services/web-apps/lemmy.md
+++ b/nixpkgs/nixos/modules/services/web-apps/lemmy.md
@@ -7,13 +7,15 @@ Lemmy is a federated alternative to reddit in rust.
 the minimum to start lemmy is
 
 ```nix
-services.lemmy = {
-  enable = true;
-  settings = {
-    hostname = "lemmy.union.rocks";
-    database.createLocally = true;
+{
+  services.lemmy = {
+    enable = true;
+    settings = {
+      hostname = "lemmy.union.rocks";
+      database.createLocally = true;
+    };
+    caddy.enable = true;
   };
-  caddy.enable = true;
 }
 ```
 
diff --git a/nixpkgs/nixos/modules/services/web-apps/nextcloud.md b/nixpkgs/nixos/modules/services/web-apps/nextcloud.md
index 5db83d7e4463..06a8712b0b8a 100644
--- a/nixpkgs/nixos/modules/services/web-apps/nextcloud.md
+++ b/nixpkgs/nixos/modules/services/web-apps/nextcloud.md
@@ -25,7 +25,7 @@ to `true`, Nextcloud will automatically be configured to connect to it through
 socket.
 
 A very basic configuration may look like this:
-```
+```nix
 { pkgs, ... }:
 {
   services.nextcloud = {
@@ -130,7 +130,7 @@ settings `listen.owner` &amp; `listen.group` in the
 [corresponding `phpfpm` pool](#opt-services.phpfpm.pools).
 
 An exemplary configuration may look like this:
-```
+```nix
 { config, lib, pkgs, ... }: {
   services.nginx.enable = false;
   services.nextcloud = {
@@ -205,7 +205,7 @@ If major-releases will be abandoned by upstream, we should check first if those
 in NixOS for a safe upgrade-path before removing those. In that case we should keep those
 packages, but mark them as insecure in an expression like this (in
 `<nixpkgs/pkgs/servers/nextcloud/default.nix>`):
-```
+```nix
 /* ... */
 {
   nextcloud17 = generic {
diff --git a/nixpkgs/nixos/modules/services/web-apps/peertube.nix b/nixpkgs/nixos/modules/services/web-apps/peertube.nix
index 39c02c81c423..76f869913592 100644
--- a/nixpkgs/nixos/modules/services/web-apps/peertube.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/peertube.nix
@@ -61,18 +61,16 @@ let
     eval -- "\$@"
   '';
 
-  peertubeCli = pkgs.writeShellScriptBin "peertube" ''
-    node ~/dist/server/tools/peertube.js $@
+  nginxCommonHeaders = lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.forceSSL ''
+    add_header Strict-Transport-Security 'max-age=31536000';
+  '' + lib.optionalString (config.services.nginx.virtualHosts.${cfg.localDomain}.quic && config.services.nginx.virtualHosts.${cfg.localDomain}.http3) ''
+    add_header Alt-Svc 'h3=":$server_port"; ma=604800';
   '';
 
-  nginxCommonHeaders = lib.optionalString cfg.enableWebHttps ''
-    add_header Strict-Transport-Security      'max-age=63072000; includeSubDomains';
-  '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
-    add_header Alt-Svc                        'h3=":443"; ma=86400';
-  '' + ''
-    add_header Access-Control-Allow-Origin    '*';
-    add_header Access-Control-Allow-Methods   'GET, OPTIONS';
-    add_header Access-Control-Allow-Headers   'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
+  nginxCommonHeadersExtra = ''
+    add_header Access-Control-Allow-Origin '*';
+    add_header Access-Control-Allow-Methods 'GET, OPTIONS';
+    add_header Access-Control-Allow-Headers 'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
   '';
 
 in {
@@ -330,6 +328,8 @@ in {
       }
     ];
 
+    environment.systemPackages = [ cfg.package.cli ];
+
     services.peertube.settings = lib.mkMerge [
       {
         listen = {
@@ -355,12 +355,13 @@ in {
           tmp_persistent = lib.mkDefault "/var/lib/peertube/storage/tmp_persistent/";
           bin = lib.mkDefault "/var/lib/peertube/storage/bin/";
           avatars = lib.mkDefault "/var/lib/peertube/storage/avatars/";
-          videos = lib.mkDefault "/var/lib/peertube/storage/videos/";
+          web_videos = lib.mkDefault "/var/lib/peertube/storage/web-videos/";
           streaming_playlists = lib.mkDefault "/var/lib/peertube/storage/streaming-playlists/";
           redundancy = lib.mkDefault "/var/lib/peertube/storage/redundancy/";
           logs = lib.mkDefault "/var/lib/peertube/storage/logs/";
           previews = lib.mkDefault "/var/lib/peertube/storage/previews/";
           thumbnails = lib.mkDefault "/var/lib/peertube/storage/thumbnails/";
+          storyboards = lib.mkDefault "/var/lib/peertube/storage/storyboards/";
           torrents = lib.mkDefault "/var/lib/peertube/storage/torrents/";
           captions = lib.mkDefault "/var/lib/peertube/storage/captions/";
           cache = lib.mkDefault "/var/lib/peertube/storage/cache/";
@@ -428,7 +429,7 @@ in {
 
       environment = env;
 
-      path = with pkgs; [ bashInteractive ffmpeg nodejs_18 openssl yarn python3 ];
+      path = with pkgs; [ nodejs_18 yarn ffmpeg-headless openssl ];
 
       script = ''
         #!/bin/sh
@@ -456,7 +457,7 @@ in {
         ln -sf ${cfg.package}/config/default.yaml /var/lib/peertube/config/default.yaml
         ln -sf ${cfg.package}/client/dist -T /var/lib/peertube/www/client
         ln -sf ${cfg.settings.storage.client_overrides} -T /var/lib/peertube/www/client-overrides
-        npm start
+        node dist/server
       '';
       serviceConfig = {
         Type = "simple";
@@ -488,6 +489,9 @@ in {
 
     services.nginx = lib.mkIf cfg.configureNginx {
       enable = true;
+      upstreams."peertube".servers = {
+        "127.0.0.1:${toString cfg.listenHttp}".fail_timeout = "0";
+      };
       virtualHosts."${cfg.localDomain}" = {
         root = "/var/lib/peertube/www";
 
@@ -497,14 +501,14 @@ in {
           priority = 1110;
         };
 
-        locations."= /api/v1/videos/upload-resumable" = {
+        locations."~ ^/api/v1/videos/(upload-resumable|([^/]+/source/replace-resumable))$" = {
           tryFiles = "/dev/null @api";
           priority = 1120;
 
           extraConfig = ''
-            client_max_body_size                        0;
-            proxy_request_buffering                     off;
-          '';
+            client_max_body_size 0;
+            proxy_request_buffering off;
+          '' + nginxCommonHeaders;
         };
 
         locations."~ ^/api/v1/videos/(upload|([^/]+/studio/edit))$" = {
@@ -513,13 +517,11 @@ in {
           priority = 1130;
 
           extraConfig = ''
-            client_max_body_size                        12G;
-            add_header X-File-Maximum-Size              8G always;
-          '' + lib.optionalString cfg.enableWebHttps ''
-            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
-          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
-            add_header Alt-Svc                          'h3=":443"; ma=86400';
-          '';
+            limit_except POST HEAD { deny all; }
+
+            client_max_body_size 12G;
+            add_header X-File-Maximum-Size 8G always;
+          '' + nginxCommonHeaders;
         };
 
         locations."~ ^/api/v1/runners/jobs/[^/]+/(update|success)$" = {
@@ -528,13 +530,9 @@ in {
           priority = 1135;
 
           extraConfig = ''
-            client_max_body_size                        12G;
-            add_header X-File-Maximum-Size              8G always;
-          '' + lib.optionalString cfg.enableWebHttps ''
-            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
-          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
-            add_header Alt-Svc                          'h3=":443"; ma=86400';
-          '';
+            client_max_body_size 12G;
+            add_header X-File-Maximum-Size 8G always;
+          '' + nginxCommonHeaders;
         };
 
         locations."~ ^/api/v1/(videos|video-playlists|video-channels|users/me)" = {
@@ -542,32 +540,28 @@ in {
           priority = 1140;
 
           extraConfig = ''
-            client_max_body_size                        6M;
-            add_header X-File-Maximum-Size              4M always;
-          '' + lib.optionalString cfg.enableWebHttps ''
-            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
-          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
-            add_header Alt-Svc                          'h3=":443"; ma=86400';
-          '';
+            client_max_body_size 6M;
+            add_header X-File-Maximum-Size 4M always;
+          '' + nginxCommonHeaders;
         };
 
         locations."@api" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          proxyPass = "http://peertube";
           priority = 1150;
 
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
 
-            proxy_connect_timeout                       10m;
+            proxy_connect_timeout 10m;
 
-            proxy_send_timeout                          10m;
-            proxy_read_timeout                          10m;
+            proxy_send_timeout 10m;
+            proxy_read_timeout 10m;
 
-            client_max_body_size                        100k;
-            send_timeout                                10m;
-          '';
+            client_max_body_size 100k;
+            send_timeout 10m;
+          ''+ nginxCommonHeaders;
         };
 
         # Websocket
@@ -581,7 +575,7 @@ in {
           priority = 1220;
 
           extraConfig = ''
-            proxy_read_timeout                          15m;
+            proxy_read_timeout 15m;
           '';
         };
 
@@ -591,84 +585,82 @@ in {
         };
 
         locations."@api_websocket" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          proxyPass = "http://peertube";
           priority = 1240;
 
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
-            proxy_set_header Upgrade                    $http_upgrade;
-            proxy_set_header Connection                 'upgrade';
-
-            proxy_http_version                          1.1;
-          '';
+            proxy_http_version 1.1;
+            proxy_set_header Upgrade $http_upgrade;
+            proxy_set_header Connection 'upgrade';
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
+          '' + nginxCommonHeaders;
         };
 
         # Bypass PeerTube for performance reasons.
         locations."~ ^/client/(assets/images/(icons/icon-36x36\.png|icons/icon-48x48\.png|icons/icon-72x72\.png|icons/icon-96x96\.png|icons/icon-144x144\.png|icons/icon-192x192\.png|icons/icon-512x512\.png|logo\.svg|favicon\.png|default-playlist\.jpg|default-avatar-account\.png|default-avatar-account-48x48\.png|default-avatar-video-channel\.png|default-avatar-video-channel-48x48\.png))$" = {
           tryFiles = "/client-overrides/$1 /client/$1 $1";
           priority = 1310;
+
+          extraConfig = nginxCommonHeaders;
         };
 
         locations."~ ^/client/(.*\.(js|css|png|svg|woff2|otf|ttf|woff|eot))$" = {
           alias = "${cfg.package}/client/dist/$1";
           priority = 1320;
           extraConfig = ''
-            add_header Cache-Control                    'public, max-age=604800, immutable';
-          '' + lib.optionalString cfg.enableWebHttps ''
-            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
-          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
-            add_header Alt-Svc                          'h3=":443"; ma=86400';
-          '';
+            add_header Cache-Control 'public, max-age=604800, immutable';
+          '' + nginxCommonHeaders;
         };
 
         locations."^~ /download/" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          proxyPass = "http://peertube";
           priority = 1410;
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
 
-            proxy_limit_rate                            5M;
-          '';
+            proxy_limit_rate 5M;
+          '' + nginxCommonHeaders;
         };
 
-        locations."^~ /static/streaming-playlists/private/" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+        locations."^~ /static/streaming-playlists/hls/private/" = {
+          proxyPass = "http://peertube";
           priority = 1420;
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
 
-            proxy_limit_rate                            5M;
-          '';
+            proxy_limit_rate 5M;
+          '' + nginxCommonHeaders;
         };
 
         locations."^~ /static/web-videos/private/" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          proxyPass = "http://peertube";
           priority = 1430;
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
 
-            proxy_limit_rate                            5M;
-          '';
+            proxy_limit_rate 5M;
+          '' + nginxCommonHeaders;
         };
 
         locations."^~ /static/webseed/private/" = {
-          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          proxyPass = "http://peertube";
           priority = 1440;
           extraConfig = ''
-            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
-            proxy_set_header Host                       $host;
-            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
 
-            proxy_limit_rate                            5M;
-          '';
+            proxy_limit_rate 5M;
+          '' + nginxCommonHeaders;
         };
 
         locations."^~ /static/redundancy/" = {
@@ -676,33 +668,35 @@ in {
           root = cfg.settings.storage.redundancy;
           priority = 1450;
           extraConfig = ''
-            set $peertube_limit_rate                    800k;
+            set $peertube_limit_rate 800k;
 
             if ($request_uri ~ -fragmented.mp4$) {
-              set $peertube_limit_rate                  5M;
+              set $peertube_limit_rate 5M;
             }
 
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
-              add_header Access-Control-Max-Age         1728000;
-              add_header Content-Type                   'text/plain charset=UTF-8';
-              add_header Content-Length                 0;
-              return                                    204;
+              ${nginxCommonHeadersExtra}
+              add_header Access-Control-Max-Age 1728000;
+              add_header Content-Type 'text/plain charset=UTF-8';
+              add_header Content-Length 0;
+              return 204;
             }
             if ($request_method = 'GET') {
               ${nginxCommonHeaders}
+              ${nginxCommonHeadersExtra}
 
-              access_log                                off;
+              access_log off;
             }
 
-            aio                                         threads;
-            sendfile                                    on;
-            sendfile_max_chunk                          1M;
+            aio threads;
+            sendfile on;
+            sendfile_max_chunk 1M;
 
-            limit_rate                                  $peertube_limit_rate;
-            limit_rate_after                            5M;
+            limit_rate $peertube_limit_rate;
+            limit_rate_after 5M;
 
-            rewrite ^/static/redundancy/(.*)$           /$1 break;
+            rewrite ^/static/redundancy/(.*)$ /$1 break;
           '';
         };
 
@@ -711,109 +705,111 @@ in {
           root = cfg.settings.storage.streaming_playlists;
           priority = 1460;
           extraConfig = ''
-            set $peertube_limit_rate                    800k;
+            set $peertube_limit_rate 800k;
 
             if ($request_uri ~ -fragmented.mp4$) {
-              set $peertube_limit_rate                  5M;
+              set $peertube_limit_rate 5M;
             }
 
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
-              add_header Access-Control-Max-Age         1728000;
-              add_header Content-Type                   'text/plain charset=UTF-8';
-              add_header Content-Length                 0;
-              return                                    204;
+              ${nginxCommonHeadersExtra}
+              add_header Access-Control-Max-Age 1728000;
+              add_header Content-Type 'text/plain charset=UTF-8';
+              add_header Content-Length 0;
+              return 204;
             }
             if ($request_method = 'GET') {
               ${nginxCommonHeaders}
+              ${nginxCommonHeadersExtra}
 
-              access_log                                off;
+              access_log off;
             }
 
-            aio                                         threads;
-            sendfile                                    on;
-            sendfile_max_chunk                          1M;
+            aio threads;
+            sendfile on;
+            sendfile_max_chunk 1M;
 
-            limit_rate                                  $peertube_limit_rate;
-            limit_rate_after                            5M;
+            limit_rate $peertube_limit_rate;
+            limit_rate_after 5M;
 
-            rewrite ^/static/streaming-playlists/(.*)$  /$1 break;
+            rewrite ^/static/streaming-playlists/(.*)$ /$1 break;
           '';
         };
 
         locations."^~ /static/web-videos/" = {
           tryFiles = "$uri @api";
-          root = cfg.settings.storage.streaming_playlists;
+          root = cfg.settings.storage.web_videos;
           priority = 1470;
           extraConfig = ''
-            set $peertube_limit_rate                    800k;
+            set $peertube_limit_rate 800k;
 
             if ($request_uri ~ -fragmented.mp4$) {
-              set $peertube_limit_rate                  5M;
+              set $peertube_limit_rate 5M;
             }
 
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
-              add_header Access-Control-Max-Age         1728000;
-              add_header Content-Type                   'text/plain charset=UTF-8';
-              add_header Content-Length                 0;
-              return                                    204;
+              ${nginxCommonHeadersExtra}
+              add_header Access-Control-Max-Age 1728000;
+              add_header Content-Type 'text/plain charset=UTF-8';
+              add_header Content-Length 0;
+              return 204;
             }
             if ($request_method = 'GET') {
               ${nginxCommonHeaders}
+              ${nginxCommonHeadersExtra}
 
-              access_log                                off;
+              access_log off;
             }
 
-            aio                                         threads;
-            sendfile                                    on;
-            sendfile_max_chunk                          1M;
+            aio threads;
+            sendfile on;
+            sendfile_max_chunk 1M;
 
-            limit_rate                                  $peertube_limit_rate;
-            limit_rate_after                            5M;
+            limit_rate $peertube_limit_rate;
+            limit_rate_after 5M;
 
-            rewrite ^/static/streaming-playlists/(.*)$  /$1 break;
+            rewrite ^/static/web-videos/(.*)$ /$1 break;
           '';
         };
 
         locations."^~ /static/webseed/" = {
           tryFiles = "$uri @api";
-          root = cfg.settings.storage.videos;
+          root = cfg.settings.storage.web_videos;
           priority = 1480;
           extraConfig = ''
-            set $peertube_limit_rate                    800k;
+            set $peertube_limit_rate 800k;
 
             if ($request_uri ~ -fragmented.mp4$) {
-              set $peertube_limit_rate                  5M;
+              set $peertube_limit_rate 5M;
             }
 
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
-              add_header Access-Control-Max-Age         1728000;
-              add_header Content-Type                   'text/plain charset=UTF-8';
-              add_header Content-Length                 0;
-              return                                    204;
+              ${nginxCommonHeadersExtra}
+              add_header Access-Control-Max-Age 1728000;
+              add_header Content-Type 'text/plain charset=UTF-8';
+              add_header Content-Length 0;
+              return 204;
             }
             if ($request_method = 'GET') {
               ${nginxCommonHeaders}
+              ${nginxCommonHeadersExtra}
 
-              access_log                                off;
+              access_log off;
             }
 
-            aio                                         threads;
-            sendfile                                    on;
-            sendfile_max_chunk                          1M;
+            aio threads;
+            sendfile on;
+            sendfile_max_chunk 1M;
 
-            limit_rate                                  $peertube_limit_rate;
-            limit_rate_after                            5M;
+            limit_rate $peertube_limit_rate;
+            limit_rate_after 5M;
 
-            rewrite ^/static/webseed/(.*)$              /$1 break;
+            rewrite ^/static/webseed/(.*)$ /web-videos/$1 break;
           '';
         };
-
-        extraConfig = lib.optionalString cfg.enableWebHttps ''
-          add_header Strict-Transport-Security          'max-age=63072000; includeSubDomains';
-        '';
       };
     };
 
@@ -848,7 +844,7 @@ in {
           home = cfg.package;
         };
       })
-      (lib.attrsets.setAttrByPath [ cfg.user "packages" ] [ cfg.package peertubeEnv peertubeCli pkgs.ffmpeg pkgs.nodejs_18 pkgs.yarn ])
+      (lib.attrsets.setAttrByPath [ cfg.user "packages" ] [ peertubeEnv pkgs.nodejs_18 pkgs.yarn pkgs.ffmpeg-headless ])
       (lib.mkIf cfg.redis.enableUnixSocket {${config.services.peertube.user}.extraGroups = [ "redis-peertube" ];})
     ];
 
diff --git a/nixpkgs/nixos/modules/services/web-apps/pict-rs.md b/nixpkgs/nixos/modules/services/web-apps/pict-rs.md
index 2fa6bb3aebce..56c51e0d7259 100644
--- a/nixpkgs/nixos/modules/services/web-apps/pict-rs.md
+++ b/nixpkgs/nixos/modules/services/web-apps/pict-rs.md
@@ -7,7 +7,9 @@ pict-rs is a  a simple image hosting service.
 the minimum to start pict-rs is
 
 ```nix
-services.pict-rs.enable = true;
+{
+  services.pict-rs.enable = true;
+}
 ```
 
 this will start the http server on port 8080 by default.
diff --git a/nixpkgs/nixos/modules/services/web-apps/plausible.md b/nixpkgs/nixos/modules/services/web-apps/plausible.md
index 1328ce69441a..d3673eabddd4 100644
--- a/nixpkgs/nixos/modules/services/web-apps/plausible.md
+++ b/nixpkgs/nixos/modules/services/web-apps/plausible.md
@@ -11,7 +11,7 @@ $ openssl rand -base64 64
 ```
 
 After that, `plausible` can be deployed like this:
-```
+```nix
 {
   services.plausible = {
     enable = true;
diff --git a/nixpkgs/nixos/modules/services/web-apps/pretix.nix b/nixpkgs/nixos/modules/services/web-apps/pretix.nix
index 2355f8c450a1..22ee9769aa92 100644
--- a/nixpkgs/nixos/modules/services/web-apps/pretix.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/pretix.nix
@@ -63,7 +63,7 @@ in
   };
 
   options.services.pretix = {
-    enable = mkEnableOption "pretix";
+    enable = mkEnableOption "Pretix, a ticket shop application for conferences, festivals, concerts, etc.";
 
     package = mkPackageOption pkgs "pretix" { };
 
diff --git a/nixpkgs/nixos/modules/services/web-apps/slskd.nix b/nixpkgs/nixos/modules/services/web-apps/slskd.nix
index 580f66ec3ac9..15a5fd1177ad 100644
--- a/nixpkgs/nixos/modules/services/web-apps/slskd.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/slskd.nix
@@ -2,120 +2,248 @@
 
 let
   settingsFormat = pkgs.formats.yaml {};
+  defaultUser = "slskd";
 in {
   options.services.slskd = with lib; with types; {
     enable = mkEnableOption "enable slskd";
 
-    rotateLogs = mkEnableOption "enable an unit and timer that will rotate logs in /var/slskd/logs";
+    package = mkPackageOptionMD pkgs "slskd" { };
 
-    package = mkPackageOption pkgs "slskd" { };
+    user = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = "User account under which slskd runs.";
+    };
 
-    nginx = mkOption {
-      description = lib.mdDoc "options for nginx";
-      example = {
-        enable = true;
-        domain = "example.com";
-        contextPath = "/slskd";
-      };
-      type = submodule ({name, config, ...}: {
-        options = {
-          enable = mkEnableOption "enable nginx as a reverse proxy";
+    group = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = "Group under which slskd runs.";
+    };
 
-          domainName = mkOption {
-            type = str;
-            description = "Domain you want to use";
-          };
-          contextPath = mkOption {
-            type = types.path;
-            default = "/";
-            description = lib.mdDoc ''
-              The context path, i.e., the last part of the slskd
-              URL. Typically '/' or '/slskd'. Default '/'
-            '';
-          };
-        };
-      });
+    domain = mkOption {
+      type = types.nullOr types.str;
+      description = ''
+        If non-null, enables an nginx reverse proxy virtual host at this FQDN,
+        at the path configurated with `services.slskd.web.url_base`.
+      '';
+      example = "slskd.example.com";
+    };
+
+    nginx = mkOption {
+      type = types.submodule (import ../web-servers/nginx/vhost-options.nix { inherit config lib; });
+      default = {};
+      example = lib.literalExpression ''
+        {
+          enableACME = true;
+          forceHttps = true;
+        }
+      '';
+      description = ''
+        This option customizes the nginx virtual host set up for slskd.
+      '';
     };
 
     environmentFile = mkOption {
       type = path;
       description = ''
-        Path to a file containing secrets.
-        It must at least contain the variable `SLSKD_SLSK_PASSWORD`
+        Path to the environment file sourced on startup.
+        It must at least contain the variables `SLSKD_SLSK_USERNAME` and `SLSKD_SLSK_PASSWORD`.
+        Web interface credentials should also be set here in `SLSKD_USERNAME` and `SLSKD_PASSWORD`.
+        Other, optional credentials like SOCKS5 with `SLSKD_SLSK_PROXY_USERNAME` and `SLSKD_SLSK_PROXY_PASSWORD`
+        should all reside here instead of in the world-readable nix store.
+        Variables are documented at https://github.com/slskd/slskd/blob/master/docs/config.md
       '';
     };
 
     openFirewall = mkOption {
       type = bool;
-      description = ''
-        Whether to open the firewall for services.slskd.settings.listen_port";
-      '';
+      description = "Whether to open the firewall for the soulseek network listen port (not the web interface port).";
       default = false;
     };
 
     settings = mkOption {
-      description = lib.mdDoc ''
-        Configuration for slskd, see
-        [available options](https://github.com/slskd/slskd/blob/master/docs/config.md)
-        `APP_DIR` is set to /var/lib/slskd, where default download & incomplete directories,
-        log and databases will be created.
+      description = ''
+        Application configuration for slskd. See
+        [documentation](https://github.com/slskd/slskd/blob/master/docs/config.md).
       '';
       default = {};
       type = submodule {
         freeformType = settingsFormat.type;
         options = {
+          remote_file_management = mkEnableOption "modification of share contents through the web ui";
+
+          flags = {
+            force_share_scan = mkOption {
+              type = bool;
+              description = "Force a rescan of shares on every startup.";
+            };
+            no_version_check = mkOption {
+              type = bool;
+              default = true;
+              visible = false;
+              description = "Don't perform a version check on startup.";
+            };
+          };
+
+          directories = {
+            incomplete = mkOption {
+              type = nullOr path;
+              description = "Directory where incomplete downloading files are stored.";
+              defaultText = "/var/lib/slskd/incomplete";
+              default = null;
+            };
+            downloads = mkOption {
+              type = nullOr path;
+              description = "Directory where downloaded files are stored.";
+              defaultText = "/var/lib/slskd/downloads";
+              default = null;
+            };
+          };
+
+          shares = {
+            directories = mkOption {
+              type = listOf str;
+              description = ''
+                Paths to shared directories. See
+                [documentation](https://github.com/slskd/slskd/blob/master/docs/config.md#directories)
+                for advanced usage.
+              '';
+              example = lib.literalExpression ''[ "/home/John/Music" "!/home/John/Music/Recordings" "[Music Drive]/mnt" ]'';
+            };
+            filters = mkOption {
+              type = listOf str;
+              example = lib.literalExpression ''[ "\.ini$" "Thumbs.db$" "\.DS_Store$" ]'';
+              description = "Regular expressions of files to exclude from sharing.";
+            };
+          };
+
+          rooms = mkOption {
+            type = listOf str;
+            description = "Chat rooms to join on startup.";
+          };
 
           soulseek = {
-            username = mkOption {
+            description = mkOption {
               type = str;
-              description = "Username on the Soulseek Network";
+              description = "The user description for the Soulseek network.";
+              defaultText = "A slskd user. https://github.com/slskd/slskd";
             };
             listen_port = mkOption {
               type = port;
-              description = "Port to use for communication on the Soulseek Network";
-              default = 50000;
+              description = "The port on which to listen for incoming connections.";
+              default = 50300;
             };
           };
 
+          global = {
+            # TODO speed units
+            upload = {
+              slots = mkOption {
+                type = ints.unsigned;
+                description = "Limit of the number of concurrent upload slots.";
+              };
+              speed_limit = mkOption {
+                type = ints.unsigned;
+                description = "Total upload speed limit.";
+              };
+            };
+            download = {
+              slots = mkOption {
+                type = ints.unsigned;
+                description = "Limit of the number of concurrent download slots.";
+              };
+              speed_limit = mkOption {
+                type = ints.unsigned;
+                description = "Total upload download limit";
+              };
+            };
+          };
+
+          filters.search.request = mkOption {
+            type = listOf str;
+            example = lib.literalExpression ''[ "^.{1,2}$" ]'';
+            description = "Incoming search requests which match this filter are ignored.";
+          };
+
           web = {
             port = mkOption {
               type = port;
-              default = 5001;
-              description = "The HTTP listen port";
+              default = 5030;
+              description = "The HTTP listen port.";
             };
             url_base = mkOption {
               type = path;
-              default = config.services.slskd.nginx.contextPath;
-              defaultText = "config.services.slskd.nginx.contextPath";
-              description = lib.mdDoc ''
-                The context path, i.e., the last part of the slskd URL
-              '';
+              default = "/";
+              description = "The base path in the url for web requests.";
+            };
+            # Users should use a reverse proxy instead for https
+            https.disabled = mkOption {
+              type = bool;
+              default = true;
+              description = "Disable the built-in HTTPS server";
             };
           };
 
-          shares = {
-            directories = mkOption {
-              type = listOf str;
-              description = lib.mdDoc ''
-                Paths to your shared directories. See
-                [documentation](https://github.com/slskd/slskd/blob/master/docs/config.md#directories)
-                for advanced usage
-              '';
+          retention = {
+            transfers = {
+              upload = {
+                succeeded = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of succeeded upload tasks.";
+                  defaultText = "(indefinite)";
+                };
+                errored = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of errored upload tasks.";
+                  defaultText = "(indefinite)";
+                };
+                cancelled = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of cancelled upload tasks.";
+                  defaultText = "(indefinite)";
+                };
+              };
+              download = {
+                succeeded = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of succeeded download tasks.";
+                  defaultText = "(indefinite)";
+                };
+                errored = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of errored download tasks.";
+                  defaultText = "(indefinite)";
+                };
+                cancelled = mkOption {
+                  type = ints.unsigned;
+                  description = "Lifespan of cancelled download tasks.";
+                  defaultText = "(indefinite)";
+                };
+              };
+            };
+            files = {
+              complete = mkOption {
+                type = ints.unsigned;
+                description = "Lifespan of completely downloaded files in minutes.";
+                example = 20160;
+                defaultText = "(indefinite)";
+              };
+              incomplete = mkOption {
+                type = ints.unsigned;
+                description = "Lifespan of incomplete downloading files in minutes.";
+                defaultText = "(indefinite)";
+              };
             };
           };
 
-          directories = {
-            incomplete = mkOption {
-              type = nullOr path;
-              description = "Directory where downloading files are stored";
-              defaultText = "<APP_DIR>/incomplete";
-              default = null;
-            };
-            downloads = mkOption {
-              type = nullOr path;
-              description = "Directory where downloaded files are stored";
-              defaultText = "<APP_DIR>/downloads";
-              default = null;
+          logger = {
+            # Disable by default, journald already retains as needed
+            disk = mkOption {
+              type = bool;
+              description = "Whether to log to the application directory.";
+              default = false;
+              visible = false;
             };
           };
         };
@@ -126,51 +254,42 @@ in {
   config = let
     cfg = config.services.slskd;
 
-    confWithoutNullValues = (lib.filterAttrs (key: value: value != null) cfg.settings);
+    confWithoutNullValues = (lib.filterAttrsRecursive (key: value: (builtins.tryEval value).success && value != null) cfg.settings);
 
     configurationYaml = settingsFormat.generate "slskd.yml" confWithoutNullValues;
 
   in lib.mkIf cfg.enable {
 
-    users = {
-      users.slskd = {
+    # Force off, configuration file is in nix store and is immutable
+    services.slskd.settings.remote_configuration = lib.mkForce false;
+
+    users.users = lib.optionalAttrs (cfg.user == defaultUser) {
+      "${defaultUser}" = {
+        group = cfg.group;
         isSystemUser = true;
-        group = "slskd";
       };
-      groups.slskd = {};
     };
 
-    # Reverse proxy configuration
-    services.nginx.enable = true;
-    services.nginx.virtualHosts."${cfg.nginx.domainName}" = {
-      forceSSL = true;
-      enableACME = true;
-      locations = {
-        "${cfg.nginx.contextPath}" = {
-          proxyPass = "http://localhost:${toString cfg.settings.web.port}";
-          proxyWebsockets = true;
-        };
-      };
+    users.groups = lib.optionalAttrs (cfg.group == defaultUser) {
+      "${defaultUser}" = {};
     };
 
-    # Hide state & logs
-    systemd.tmpfiles.rules = [
-      "d /var/lib/slskd/data 0750 slskd slskd - -"
-      "d /var/lib/slskd/logs 0750 slskd slskd - -"
-    ];
-
     systemd.services.slskd = {
       description = "A modern client-server application for the Soulseek file sharing network";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         Type = "simple";
-        User = "slskd";
+        User = cfg.user;
+        Group = cfg.group;
         EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
-        StateDirectory = "slskd";
+        StateDirectory = "slskd";  # Creates /var/lib/slskd and manages permissions
         ExecStart = "${cfg.package}/bin/slskd --app-dir /var/lib/slskd --config ${configurationYaml}";
         Restart = "on-failure";
         ReadOnlyPaths = map (d: builtins.elemAt (builtins.split "[^/]*(/.+)" d) 1) cfg.settings.shares.directories;
+        ReadWritePaths =
+          (lib.optional (cfg.settings.directories.incomplete != null) cfg.settings.directories.incomplete) ++
+          (lib.optional (cfg.settings.directories.downloads != null) cfg.settings.directories.downloads);
         LockPersonality = true;
         NoNewPrivileges = true;
         PrivateDevices = true;
@@ -194,18 +313,21 @@ in {
 
     networking.firewall.allowedTCPPorts = lib.optional cfg.openFirewall cfg.settings.soulseek.listen_port;
 
-    systemd.services.slskd-rotatelogs = lib.mkIf cfg.rotateLogs {
-      description = "Rotate slskd logs";
-      serviceConfig = {
-        Type = "oneshot";
-        User = "slskd";
-        ExecStart = [
-          "${pkgs.findutils}/bin/find /var/lib/slskd/logs/ -type f -mtime +10 -delete"
-          "${pkgs.findutils}/bin/find /var/lib/slskd/logs/ -type f -mtime +1  -exec ${pkgs.gzip}/bin/gzip -q {} ';'"
-        ];
-      };
-      startAt = "daily";
+    services.nginx = lib.mkIf (cfg.domain != null) {
+      enable = lib.mkDefault true;
+      virtualHosts."${cfg.domain}" = lib.mkMerge [
+        cfg.nginx
+        {
+          locations."${cfg.settings.web.url_base}" = {
+            proxyPass = "http://127.0.0.1:${toString cfg.settings.web.port}";
+            proxyWebsockets = true;
+          };
+        }
+      ];
     };
+  };
 
+  meta = {
+    maintainers = with lib.maintainers; [ ppom melvyn2 ];
   };
 }
diff --git a/nixpkgs/nixos/modules/services/web-apps/suwayomi-server.md b/nixpkgs/nixos/modules/services/web-apps/suwayomi-server.md
index 18e7a631443f..2185556a8721 100644
--- a/nixpkgs/nixos/modules/services/web-apps/suwayomi-server.md
+++ b/nixpkgs/nixos/modules/services/web-apps/suwayomi-server.md
@@ -100,7 +100,7 @@ Not all the configuration options are available directly in this module, but you
       server = {
         port = 4567;
         autoDownloadNewChapters = false;
-        maxSourcesInParallel" = 6;
+        maxSourcesInParallel = 6;
         extensionRepos = [
           "https://raw.githubusercontent.com/MY_ACCOUNT/MY_REPO/repo/index.min.json"
         ];
diff --git a/nixpkgs/nixos/modules/services/web-servers/garage.md b/nixpkgs/nixos/modules/services/web-servers/garage.md
index 3a9b85ce0603..fbefd1914d87 100644
--- a/nixpkgs/nixos/modules/services/web-servers/garage.md
+++ b/nixpkgs/nixos/modules/services/web-servers/garage.md
@@ -80,7 +80,7 @@ If major-releases will be abandoned by upstream, we should check first if those
 in NixOS for a safe upgrade-path before removing those. In that case we should keep those
 packages, but mark them as insecure in an expression like this (in
 `<nixpkgs/pkgs/tools/filesystem/garage/default.nix>`):
-```
+```nix
 /* ... */
 {
   garage_0_7_3 = generic {
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md
index aa36f66970ec..2b4bd06df04f 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md
@@ -8,9 +8,11 @@ All of the core apps, optional apps, games, and core developer tools from GNOME
 
 To enable the GNOME desktop use:
 
-```
-services.xserver.desktopManager.gnome.enable = true;
-services.xserver.displayManager.gdm.enable = true;
+```nix
+{
+  services.xserver.desktopManager.gnome.enable = true;
+  services.xserver.displayManager.gdm.enable = true;
+}
 ```
 
 ::: {.note}
@@ -23,8 +25,10 @@ The default applications used in NixOS are very minimal, inspired by the default
 
 If you’d like to only use the GNOME desktop and not the apps, you can disable them with:
 
-```
-services.gnome.core-utilities.enable = false;
+```nix
+{
+  services.gnome.core-utilities.enable = false;
+}
 ```
 
 and none of them will be installed.
@@ -37,9 +41,11 @@ Note that this mechanism can only exclude core utilities, games and core develop
 
 It is also possible to disable many of the [core services](https://github.com/NixOS/nixpkgs/blob/b8ec4fd2a4edc4e30d02ba7b1a2cc1358f3db1d5/nixos/modules/services/x11/desktop-managers/gnome.nix#L329-L348). For example, if you do not need indexing files, you can disable Tracker with:
 
-```
-services.gnome.tracker-miners.enable = false;
-services.gnome.tracker.enable = false;
+```nix
+{
+  services.gnome.tracker-miners.enable = false;
+  services.gnome.tracker.enable = false;
+}
 ```
 
 Note, however, that doing so is not supported and might break some applications. Notably, GNOME Music cannot work without Tracker.
@@ -48,39 +54,47 @@ Note, however, that doing so is not supported and might break some applications.
 
 You can install all of the GNOME games with:
 
-```
-services.gnome.games.enable = true;
+```nix
+{
+  services.gnome.games.enable = true;
+}
 ```
 
 ### GNOME core developer tools {#sec-gnome-core-developer-tools}
 
 You can install GNOME core developer tools with:
 
-```
-services.gnome.core-developer-tools.enable = true;
+```nix
+{
+  services.gnome.core-developer-tools.enable = true;
+}
 ```
 
 ## Enabling GNOME Flashback {#sec-gnome-enable-flashback}
 
 GNOME Flashback provides a desktop environment based on the classic GNOME 2 architecture. You can enable the default GNOME Flashback session, which uses the Metacity window manager, with:
 
-```
-services.xserver.desktopManager.gnome.flashback.enableMetacity = true;
+```nix
+{
+  services.xserver.desktopManager.gnome.flashback.enableMetacity = true;
+}
 ```
 
 It is also possible to create custom sessions that replace Metacity with a different window manager using [](#opt-services.xserver.desktopManager.gnome.flashback.customSessions).
 
 The following example uses `xmonad` window manager:
 
-```
-services.xserver.desktopManager.gnome.flashback.customSessions = [
-  {
-    wmName = "xmonad";
-    wmLabel = "XMonad";
-    wmCommand = "${pkgs.haskellPackages.xmonad}/bin/xmonad";
-    enableGnomePanel = false;
-  }
-];
+```nix
+{
+  services.xserver.desktopManager.gnome.flashback.customSessions = [
+    {
+      wmName = "xmonad";
+      wmLabel = "XMonad";
+      wmCommand = "${pkgs.haskellPackages.xmonad}/bin/xmonad";
+      enableGnomePanel = false;
+    }
+  ];
+}
 ```
 
 ## Icons and GTK Themes {#sec-gnome-icons-and-gtk-themes}
@@ -104,12 +118,14 @@ Some packages that include Shell extensions, like `gnome.gpaste`, don’t have t
 
 You can install them like any other package:
 
-```
-environment.systemPackages = [
-  gnomeExtensions.dash-to-dock
-  gnomeExtensions.gsconnect
-  gnomeExtensions.mpris-indicator-button
-];
+```nix
+{
+  environment.systemPackages = [
+    gnomeExtensions.dash-to-dock
+    gnomeExtensions.gsconnect
+    gnomeExtensions.mpris-indicator-button
+  ];
+}
 ```
 
 Unfortunately, we lack a way for these to be managed in a completely declarative way.
@@ -136,23 +152,25 @@ You can use `dconf-editor` tool to explore which GSettings you can set.
 
 ### Example {#sec-gnome-gsettings-overrides-example}
 
-```
-services.xserver.desktopManager.gnome = {
-  extraGSettingsOverrides = ''
-    # Change default background
-    [org.gnome.desktop.background]
-    picture-uri='file://${pkgs.nixos-artwork.wallpapers.mosaic-blue.gnomeFilePath}'
-
-    # Favorite apps in gnome-shell
-    [org.gnome.shell]
-    favorite-apps=['org.gnome.Console.desktop', 'org.gnome.Nautilus.desktop']
-  '';
-
-  extraGSettingsOverridePackages = [
-    pkgs.gsettings-desktop-schemas # for org.gnome.desktop
-    pkgs.gnome.gnome-shell # for org.gnome.shell
-  ];
-};
+```nix
+{
+  services.xserver.desktopManager.gnome = {
+    extraGSettingsOverrides = ''
+      # Change default background
+      [org.gnome.desktop.background]
+      picture-uri='file://${pkgs.nixos-artwork.wallpapers.mosaic-blue.gnomeFilePath}'
+
+      # Favorite apps in gnome-shell
+      [org.gnome.shell]
+      favorite-apps=['org.gnome.Console.desktop', 'org.gnome.Nautilus.desktop']
+    '';
+
+    extraGSettingsOverridePackages = [
+      pkgs.gsettings-desktop-schemas # for org.gnome.desktop
+      pkgs.gnome.gnome-shell # for org.gnome.shell
+    ];
+  };
+}
 ```
 
 ## Frequently Asked Questions {#sec-gnome-faq}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md b/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md
index 1c14ede84749..ce251ec2d394 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md
@@ -5,17 +5,23 @@ Pantheon is the desktop environment created for the elementary OS distribution.
 ## Enabling Pantheon {#sec-pantheon-enable}
 
 All of Pantheon is working in NixOS and the applications should be available, aside from a few [exceptions](https://github.com/NixOS/nixpkgs/issues/58161). To enable Pantheon, set
-```
-services.xserver.desktopManager.pantheon.enable = true;
+```nix
+{
+  services.xserver.desktopManager.pantheon.enable = true;
+}
 ```
 This automatically enables LightDM and Pantheon's LightDM greeter. If you'd like to disable this, set
-```
-services.xserver.displayManager.lightdm.greeters.pantheon.enable = false;
-services.xserver.displayManager.lightdm.enable = false;
+```nix
+{
+  services.xserver.displayManager.lightdm.greeters.pantheon.enable = false;
+  services.xserver.displayManager.lightdm.enable = false;
+}
 ```
 but please be aware using Pantheon without LightDM as a display manager will break screenlocking from the UI. The NixOS module for Pantheon installs all of Pantheon's default applications. If you'd like to not install Pantheon's apps, set
-```
-services.pantheon.apps.enable = false;
+```nix
+{
+  services.pantheon.apps.enable = false;
+}
 ```
 You can also use [](#opt-environment.pantheon.excludePackages) to remove any other app (like `elementary-mail`).
 
@@ -29,30 +35,33 @@ Wingpanel and Switchboard work differently than they do in other distributions,
 to configure the programs with plugs or indicators.
 
 The difference in NixOS is both these programs are patched to load plugins from a directory that is the value of an environment variable. All of which is controlled in Nix. If you need to configure the particular packages manually you can override the packages like:
-```
+```nix
 wingpanel-with-indicators.override {
   indicators = [
     pkgs.some-special-indicator
   ];
-};
+}
 
+```
+```nix
 switchboard-with-plugs.override {
   plugs = [
     pkgs.some-special-plug
   ];
-};
+}
 ```
 please note that, like how the NixOS options describe these as extra plugins, this would only add to the default plugins included with the programs. If for some reason you'd like to configure which plugins to use exactly, both packages have an argument for this:
-```
+```nix
 wingpanel-with-indicators.override {
   useDefaultIndicators = false;
   indicators = specialListOfIndicators;
-};
-
+}
+```
+```nix
 switchboard-with-plugs.override {
   useDefaultPlugs = false;
   plugs = specialListOfPlugs;
-};
+}
 ```
 this could be most useful for testing a particular plug-in in isolation.
 
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix b/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
index 0861530f21e8..3e7c6b01b3e9 100644
--- a/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
@@ -251,7 +251,6 @@ in
 
     environment.systemPackages = [pkgs.xpra];
 
-    virtualisation.virtualbox.guest.x11 = false;
     hardware.pulseaudio.enable = mkDefault cfg.pulseaudio;
     hardware.pulseaudio.systemWide = mkDefault cfg.pulseaudio;
   };
diff --git a/nixpkgs/nixos/modules/services/x11/xserver.nix b/nixpkgs/nixos/modules/services/x11/xserver.nix
index 4e0235f9ad1d..453f414e2a86 100644
--- a/nixpkgs/nixos/modules/services/x11/xserver.nix
+++ b/nixpkgs/nixos/modules/services/x11/xserver.nix
@@ -303,7 +303,7 @@ in
         type = types.listOf types.str;
         default = [ "modesetting" "fbdev" ];
         example = [
-          "nvidia" "nvidiaLegacy390" "nvidiaLegacy340" "nvidiaLegacy304"
+          "nvidia"
           "amdgpu-pro"
         ];
         # TODO(@oxij): think how to easily add the rest, like those nvidia things
diff --git a/nixpkgs/nixos/modules/system/boot/clevis.md b/nixpkgs/nixos/modules/system/boot/clevis.md
index dcbf55de60a8..39edc0fc38df 100644
--- a/nixpkgs/nixos/modules/system/boot/clevis.md
+++ b/nixpkgs/nixos/modules/system/boot/clevis.md
@@ -39,13 +39,17 @@ For more complete documentation on how to generate a secret with clevis, see the
 
 In order to activate unattended decryption of a resource at boot, enable the `clevis` module:
 
-```
-boot.initrd.clevis.enable = true;
+```nix
+{
+  boot.initrd.clevis.enable = true;
+}
 ```
 
 Then, specify the device you want to decrypt using a given clevis secret. Clevis will automatically try to decrypt the device at boot and will fallback to interactive unlocking if the decryption policy is not fulfilled.
-```
-boot.initrd.clevis.devices."/dev/nvme0n1p1".secretFile = ./nvme0n1p1.jwe;
+```nix
+{
+  boot.initrd.clevis.devices."/dev/nvme0n1p1".secretFile = ./nvme0n1p1.jwe;
+}
 ```
 
 Only `bcachefs`, `zfs` and `luks` encrypted devices are supported at this time.
diff --git a/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix b/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix
index 61e61f32bc5e..43da2496d16c 100644
--- a/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix
+++ b/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix
@@ -93,6 +93,21 @@ in
       defaultText = literalExpression "config.users.users.root.openssh.authorizedKeys.keys";
       description = lib.mdDoc ''
         Authorized keys for the root user on initrd.
+        You can combine the `authorizedKeys` and `authorizedKeyFiles` options.
+      '';
+      example = [
+        "ssh-rsa AAAAB3NzaC1yc2etc/etc/etcjwrsh8e596z6J0l7 example@host"
+        "ssh-ed25519 AAAAC3NzaCetcetera/etceteraJZMfk3QPfQ foo@bar"
+      ];
+    };
+
+    authorizedKeyFiles = mkOption {
+      type = types.listOf types.path;
+      default = config.users.users.root.openssh.authorizedKeys.keyFiles;
+      defaultText = literalExpression "config.users.users.root.openssh.authorizedKeys.keyFiles";
+      description = lib.mdDoc ''
+        Authorized keys taken from files for the root user on initrd.
+        You can combine the `authorizedKeyFiles` and `authorizedKeys` options.
       '';
     };
 
@@ -152,7 +167,7 @@ in
   in mkIf enabled {
     assertions = [
       {
-        assertion = cfg.authorizedKeys != [];
+        assertion = cfg.authorizedKeys != [] || cfg.authorizedKeyFiles != [];
         message = "You should specify at least one authorized key for initrd SSH";
       }
 
@@ -206,6 +221,9 @@ in
       ${concatStrings (map (key: ''
         echo ${escapeShellArg key} >> /root/.ssh/authorized_keys
       '') cfg.authorizedKeys)}
+      ${concatStrings (map (keyFile: ''
+        cat ${keyFile} >> /root/.ssh/authorized_keys
+      '') cfg.authorizedKeyFiles)}
 
       ${flip concatMapStrings cfg.hostKeys (path: ''
         # keys from Nix store are world-readable, which sshd doesn't like
@@ -236,9 +254,13 @@ in
 
       users.root.shell = mkIf (config.boot.initrd.network.ssh.shell != null) config.boot.initrd.network.ssh.shell;
 
-      contents."/etc/ssh/authorized_keys.d/root".text =
-        concatStringsSep "\n" config.boot.initrd.network.ssh.authorizedKeys;
-      contents."/etc/ssh/sshd_config".text = sshdConfig;
+      contents = {
+        "/etc/ssh/sshd_config".text = sshdConfig;
+        "/etc/ssh/authorized_keys.d/root".text =
+          concatStringsSep "\n" (
+            config.boot.initrd.network.ssh.authorizedKeys ++
+            (map (file: lib.fileContents file) config.boot.initrd.network.ssh.authorizedKeyFiles));
+      };
       storePaths = ["${package}/bin/sshd"];
 
       services.sshd = {
diff --git a/nixpkgs/nixos/modules/system/boot/plymouth.nix b/nixpkgs/nixos/modules/system/boot/plymouth.nix
index 16bca40993ae..85f0fd4622df 100644
--- a/nixpkgs/nixos/modules/system/boot/plymouth.nix
+++ b/nixpkgs/nixos/modules/system/boot/plymouth.nix
@@ -4,7 +4,6 @@ with lib;
 
 let
 
-  inherit (pkgs) nixos-icons;
   plymouth = pkgs.plymouth.override {
     systemd = config.boot.initrd.systemd.package;
   };
@@ -97,8 +96,8 @@ in
       logo = mkOption {
         type = types.path;
         # Dimensions are 48x48 to match GDM logo
-        default = "${nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png";
-        defaultText = literalExpression ''"''${nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png"'';
+        default = "${pkgs.nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png";
+        defaultText = literalExpression ''"''${pkgs.nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png"'';
         example = literalExpression ''
           pkgs.fetchurl {
             url = "https://nixos.org/logo/nixos-hires.png";
@@ -107,6 +106,7 @@ in
         '';
         description = lib.mdDoc ''
           Logo which is displayed on the splash screen.
+          Currently supports PNG file format only.
         '';
       };
 
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix b/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
index e4f61db0cd02..06359f273846 100644
--- a/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
@@ -392,7 +392,10 @@ in {
 
     boot.kernelParams = [
       "root=${config.boot.initrd.systemd.root}"
-    ] ++ lib.optional (config.boot.resumeDevice != "") "resume=${config.boot.resumeDevice}";
+    ] ++ lib.optional (config.boot.resumeDevice != "") "resume=${config.boot.resumeDevice}"
+      # `systemd` mounts root in initrd as read-only unless "rw" is on the kernel command line.
+      # For NixOS activation to succeed, we need to have root writable in initrd.
+      ++ lib.optional (config.boot.initrd.systemd.root == "gpt-auto") "rw";
 
     boot.initrd.systemd = {
       initrdBin = [pkgs.bash pkgs.coreutils cfg.package.kmod cfg.package];
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix
index 365cb46ff2fe..6719a03610d1 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix
@@ -7,6 +7,7 @@ let
       device = "none";
       fsType = "envfs";
       options = [
+        "bind-mount=/bin"
         "fallback-path=${pkgs.runCommand "fallback-path" {} (''
           mkdir -p $out
           ln -s ${config.environment.usrbinenv} $out/env
@@ -15,6 +16,9 @@ let
         "nofail"
       ];
     };
+    # We need to bind-mount /bin to /usr/bin, because otherwise upgrading
+    # from envfs < 1.0.5 will cause having the old envs with no /bin bind mount.
+    # Systemd is smart enough to not mount /bin if it's already mounted.
     "/bin" = {
       device = "/usr/bin";
       fsType = "none";
diff --git a/nixpkgs/nixos/modules/virtualisation/incus.nix b/nixpkgs/nixos/modules/virtualisation/incus.nix
index da7873c7bec8..1ceaa40cca9d 100644
--- a/nixpkgs/nixos/modules/virtualisation/incus.nix
+++ b/nixpkgs/nixos/modules/virtualisation/incus.nix
@@ -1,8 +1,80 @@
-{ config, lib, pkgs, ... }:
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
 
 let
   cfg = config.virtualisation.incus;
   preseedFormat = pkgs.formats.yaml { };
+
+  serverBinPath = ''${pkgs.qemu_kvm}/libexec:${
+    lib.makeBinPath (
+      with pkgs;
+      [
+        cfg.package
+
+        acl
+        attr
+        bash
+        btrfs-progs
+        cdrkit
+        coreutils
+        criu
+        dnsmasq
+        e2fsprogs
+        findutils
+        getent
+        gnugrep
+        gnused
+        gnutar
+        gptfdisk
+        gzip
+        iproute2
+        iptables
+        kmod
+        lvm2
+        minio
+        nftables
+        qemu_kvm
+        qemu-utils
+        rsync
+        squashfsTools
+        systemd
+        thin-provisioning-tools
+        util-linux
+        virtiofsd
+        xz
+
+        (writeShellScriptBin "apparmor_parser" ''
+          exec '${apparmor-parser}/bin/apparmor_parser' -I '${apparmor-profiles}/etc/apparmor.d' "$@"
+        '')
+      ]
+      ++ lib.optionals config.boot.zfs.enabled [
+        config.boot.zfs.package
+        "${config.boot.zfs.package}/lib/udev"
+      ]
+      ++ lib.optionals config.virtualisation.vswitch.enable [ config.virtualisation.vswitch.package ]
+    )
+  }'';
+
+  # https://github.com/lxc/incus/blob/cff35a29ee3d7a2af1f937cbb6cf23776941854b/internal/server/instance/drivers/driver_qemu.go#L123
+  ovmf-prefix = if pkgs.stdenv.hostPlatform.isAarch64 then "AAVMF" else "OVMF";
+  ovmf = pkgs.linkFarm "incus-ovmf" [
+    {
+      name = "OVMF_CODE.4MB.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_CODE.fd";
+    }
+    {
+      name = "OVMF_VARS.4MB.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_VARS.fd";
+    }
+    {
+      name = "OVMF_VARS.4MB.ms.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_VARS.fd";
+    }
+  ];
 in
 {
   meta = {
@@ -11,26 +83,29 @@ in
 
   options = {
     virtualisation.incus = {
-      enable = lib.mkEnableOption (lib.mdDoc ''
+      enable = lib.mkEnableOption ''
         incusd, a daemon that manages containers and virtual machines.
 
         Users in the "incus-admin" group can interact with
         the daemon (e.g. to start or stop containers) using the
         {command}`incus` command line tool, among others.
-      '');
+      '';
 
       package = lib.mkPackageOption pkgs "incus" { };
 
       lxcPackage = lib.mkPackageOption pkgs "lxc" { };
 
+      clientPackage = lib.mkPackageOption pkgs [
+        "incus"
+        "client"
+      ] { };
+
       preseed = lib.mkOption {
-        type = lib.types.nullOr (
-          lib.types.submodule { freeformType = preseedFormat.type; }
-        );
+        type = lib.types.nullOr (lib.types.submodule { freeformType = preseedFormat.type; });
 
         default = null;
 
-        description = lib.mdDoc ''
+        description = ''
           Configuration for Incus preseed, see
           <https://linuxcontainers.org/incus/docs/main/howto/initialize/#non-interactive-configuration>
           for supported values.
@@ -80,18 +155,16 @@ in
         };
       };
 
-      socketActivation = lib.mkEnableOption (
-        lib.mdDoc ''
-          socket-activation for starting incus.service. Enabling this option
-          will stop incus.service from starting automatically on boot.
-        ''
-      );
+      socketActivation = lib.mkEnableOption (''
+        socket-activation for starting incus.service. Enabling this option
+        will stop incus.service from starting automatically on boot.
+      '');
 
       startTimeout = lib.mkOption {
         type = lib.types.ints.unsigned;
         default = 600;
         apply = toString;
-        description = lib.mdDoc ''
+        description = ''
           Time to wait (in seconds) for incusd to become ready to process requests.
           If incusd does not reply within the configured time, `incus.service` will be
           considered failed and systemd will attempt to restart it.
@@ -99,9 +172,12 @@ in
       };
 
       ui = {
-        enable = lib.mkEnableOption (lib.mdDoc "(experimental) Incus UI");
+        enable = lib.mkEnableOption "(experimental) Incus UI";
 
-        package = lib.mkPackageOption pkgs [ "incus" "ui" ] { };
+        package = lib.mkPackageOption pkgs [
+          "incus"
+          "ui"
+        ] { };
       };
     };
   };
@@ -109,7 +185,12 @@ in
   config = lib.mkIf cfg.enable {
     assertions = [
       {
-        assertion = !(config.networking.firewall.enable && !config.networking.nftables.enable && config.virtualisation.incus.enable);
+        assertion =
+          !(
+            config.networking.firewall.enable
+            && !config.networking.nftables.enable
+            && config.virtualisation.incus.enable
+          );
         message = "Incus on NixOS is unsupported using iptables. Set `networking.nftables.enable = true;`";
       }
     ];
@@ -137,7 +218,12 @@ in
       "vhost_vsock"
     ] ++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
 
-    environment.systemPackages = [ cfg.package ];
+    environment.systemPackages = [
+      cfg.clientPackage
+
+      # gui console support
+      pkgs.spice-gtk
+    ];
 
     # Note: the following options are also declared in virtualisation.lxc, but
     # the latter can't be simply enabled to reuse the formers, because it
@@ -164,31 +250,24 @@ in
         "network-online.target"
         "lxcfs.service"
         "incus.socket"
-      ]
-        ++ lib.optional config.virtualisation.vswitch.enable "ovs-vswitchd.service";
+      ] ++ lib.optionals config.virtualisation.vswitch.enable [ "ovs-vswitchd.service" ];
 
       requires = [
         "lxcfs.service"
         "incus.socket"
-      ]
-        ++ lib.optional config.virtualisation.vswitch.enable "ovs-vswitchd.service";
-
-      wants = [
-        "network-online.target"
-      ];
+      ] ++ lib.optionals config.virtualisation.vswitch.enable [ "ovs-vswitchd.service" ];
 
-      path = lib.optionals config.boot.zfs.enabled [
-        config.boot.zfs.package
-        "${config.boot.zfs.package}/lib/udev"
-      ]
-        ++ lib.optional config.virtualisation.vswitch.enable config.virtualisation.vswitch.package;
+      wants = [ "network-online.target" ];
 
-      environment = lib.mkMerge [ {
-        # Override Path to the LXC template configuration directory
-        INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
-      } (lib.mkIf (cfg.ui.enable) {
-        "INCUS_UI" = cfg.ui.package;
-      }) ];
+      environment = lib.mkMerge [
+        {
+          INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
+          INCUS_OVMF_PATH = ovmf;
+          INCUS_USBIDS_PATH = "${pkgs.hwdata}/share/hwdata/usb.ids";
+          PATH = lib.mkForce serverBinPath;
+        }
+        (lib.mkIf (cfg.ui.enable) { "INCUS_UI" = cfg.ui.package; })
+      ];
 
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/incusd --group incus-admin";
@@ -222,15 +301,13 @@ in
     systemd.services.incus-preseed = lib.mkIf (cfg.preseed != null) {
       description = "Incus initialization with preseed file";
 
-      wantedBy = ["incus.service"];
-      after = ["incus.service"];
-      bindsTo = ["incus.service"];
-      partOf = ["incus.service"];
+      wantedBy = [ "incus.service" ];
+      after = [ "incus.service" ];
+      bindsTo = [ "incus.service" ];
+      partOf = [ "incus.service" ];
 
       script = ''
-        ${cfg.package}/bin/incus admin init --preseed <${
-          preseedFormat.generate "incus-preseed.yaml" cfg.preseed
-        }
+        ${cfg.package}/bin/incus admin init --preseed <${preseedFormat.generate "incus-preseed.yaml" cfg.preseed}
       '';
 
       serviceConfig = {
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
index 94f70c65436c..c2606968d3be 100644
--- a/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
@@ -5,14 +5,32 @@
 with lib;
 
 let
-
   cfg = config.virtualisation.virtualbox.guest;
   kernel = config.boot.kernelPackages;
 
-in
+  mkVirtualBoxUserService = serviceArgs: {
+    description = "VirtualBox Guest User Services ${serviceArgs}";
 
-{
+    wantedBy = [ "graphical-session.target" ];
+    partOf = [ "graphical-session.target" ];
+
+    # The graphical session may not be ready when starting the service
+    # Hence, check if the DISPLAY env var is set, otherwise fail, wait and retry again
+    startLimitBurst = 20;
+
+    unitConfig.ConditionVirtualization = "oracle";
 
+    # Check if the display environment is ready, otherwise fail
+    preStart = "${pkgs.bash}/bin/bash -c \"if [ -z $DISPLAY ]; then exit 1; fi\"";
+    serviceConfig = {
+      ExecStart = "@${kernel.virtualboxGuestAdditions}/bin/VBoxClient --foreground ${serviceArgs}";
+      # Wait after a failure, hoping that the display environment is ready after waiting
+      RestartSec = 2;
+      Restart = "always";
+    };
+  };
+in
+{
   ###### interface
 
   options.virtualisation.virtualbox.guest = {
@@ -22,32 +40,45 @@ in
       description = lib.mdDoc "Whether to enable the VirtualBox service and other guest additions.";
     };
 
-    x11 = mkOption {
+    clipboard = mkOption {
       default = true;
       type = types.bool;
-      description = lib.mdDoc "Whether to enable x11 graphics";
+      description = lib.mdDoc "Whether to enable clipboard support.";
+    };
+
+    seamless = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc "Whether to enable seamless mode. When activated windows from the guest appear next to the windows of the host.";
+    };
+
+    draganddrop = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc "Whether to enable drag and drop support.";
     };
   };
 
   ###### implementation
 
-  config = mkIf cfg.enable (mkMerge [{
-    assertions = [{
-      assertion = pkgs.stdenv.hostPlatform.isx86;
-      message = "Virtualbox not currently supported on ${pkgs.stdenv.hostPlatform.system}";
-    }];
+  config = mkIf cfg.enable (mkMerge [
+    {
+      assertions = [{
+        assertion = pkgs.stdenv.hostPlatform.isx86;
+        message = "Virtualbox not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+      }];
 
-    environment.systemPackages = [ kernel.virtualboxGuestAdditions ];
+      environment.systemPackages = [ kernel.virtualboxGuestAdditions ];
 
-    boot.extraModulePackages = [ kernel.virtualboxGuestAdditions ];
+      boot.extraModulePackages = [ kernel.virtualboxGuestAdditions ];
 
-    boot.supportedFilesystems = [ "vboxsf" ];
-    boot.initrd.supportedFilesystems = [ "vboxsf" ];
+      boot.supportedFilesystems = [ "vboxsf" ];
+      boot.initrd.supportedFilesystems = [ "vboxsf" ];
 
-    users.groups.vboxsf.gid = config.ids.gids.vboxsf;
+      users.groups.vboxsf.gid = config.ids.gids.vboxsf;
 
-    systemd.services.virtualbox =
-      { description = "VirtualBox Guest Services";
+      systemd.services.virtualbox = {
+        description = "VirtualBox Guest Services";
 
         wantedBy = [ "multi-user.target" ];
         requires = [ "dev-vboxguest.device" ];
@@ -58,36 +89,27 @@ in
         serviceConfig.ExecStart = "@${kernel.virtualboxGuestAdditions}/bin/VBoxService VBoxService --foreground";
       };
 
-    services.udev.extraRules =
-      ''
-        # /dev/vboxuser is necessary for VBoxClient to work.  Maybe we
-        # should restrict this to logged-in users.
-        KERNEL=="vboxuser",  OWNER="root", GROUP="root", MODE="0666"
-
-        # Allow systemd dependencies on vboxguest.
-        SUBSYSTEM=="misc", KERNEL=="vboxguest", TAG+="systemd"
-      '';
-  } (mkIf cfg.x11 {
-    services.xserver.videoDrivers = [ "vmware" "virtualbox" "modesetting" ];
-
-    services.xserver.config =
-      ''
-        Section "InputDevice"
-          Identifier "VBoxMouse"
-          Driver "vboxmouse"
-        EndSection
-      '';
-
-    services.xserver.serverLayoutSection =
-      ''
-        InputDevice "VBoxMouse"
-      '';
-
-    services.xserver.displayManager.sessionCommands =
-      ''
-        PATH=${makeBinPath [ pkgs.gnugrep pkgs.which pkgs.xorg.xorgserver.out ]}:$PATH \
-          ${kernel.virtualboxGuestAdditions}/bin/VBoxClient-all
-      '';
-  })]);
-
+      services.udev.extraRules =
+        ''
+          # /dev/vboxuser is necessary for VBoxClient to work.  Maybe we
+          # should restrict this to logged-in users.
+          KERNEL=="vboxuser",  OWNER="root", GROUP="root", MODE="0666"
+
+          # Allow systemd dependencies on vboxguest.
+          SUBSYSTEM=="misc", KERNEL=="vboxguest", TAG+="systemd"
+        '';
+
+      systemd.user.services.virtualboxClientVmsvga = mkVirtualBoxUserService "--vmsvga-session";
+    }
+    (
+      mkIf cfg.clipboard {
+        systemd.user.services.virtualboxClientClipboard = mkVirtualBoxUserService "--clipboard";
+      }
+    )
+    (
+      mkIf cfg.seamless {
+        systemd.user.services.virtualboxClientSeamless = mkVirtualBoxUserService "--seamless";
+      }
+    )
+  ]);
 }
diff --git a/nixpkgs/nixos/release-combined.nix b/nixpkgs/nixos/release-combined.nix
index 459700514ffc..96b24feeb063 100644
--- a/nixpkgs/nixos/release-combined.nix
+++ b/nixpkgs/nixos/release-combined.nix
@@ -169,11 +169,6 @@ in rec {
         (onFullSupported "nixpkgs.jdk")
         (onSystems ["x86_64-linux"] "nixpkgs.mesa_i686") # i686 sanity check + useful
         ["nixpkgs.tarball"]
-
-        # Ensure that nixpkgs-check-by-name is available in nixos-unstable,
-        # so that a pre-built version can be used in CI for PR's
-        # See ../pkgs/test/nixpkgs-check-by-name/README.md
-        (onSystems ["x86_64-linux"] "nixpkgs.tests.nixpkgs-check-by-name")
       ];
     };
 }
diff --git a/nixpkgs/nixos/tests/all-tests.nix b/nixpkgs/nixos/tests/all-tests.nix
index 2c08fdba6c98..f7ad6c16f587 100644
--- a/nixpkgs/nixos/tests/all-tests.nix
+++ b/nixpkgs/nixos/tests/all-tests.nix
@@ -309,6 +309,7 @@ in {
   firefox-devedition = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-devedition; };
   firefox-esr    = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr; }; # used in `tested` job
   firefox-esr-115 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-115; };
+  firefoxpwa = handleTest ./firefoxpwa.nix {};
   firejail = handleTest ./firejail.nix {};
   firewall = handleTest ./firewall.nix { nftables = false; };
   firewall-nftables = handleTest ./firewall.nix { nftables = true; };
@@ -542,7 +543,9 @@ in {
   mobilizon = handleTest ./mobilizon.nix {};
   mod_perl = handleTest ./mod_perl.nix {};
   molly-brown = handleTest ./molly-brown.nix {};
+  mollysocket = handleTest ./mollysocket.nix { };
   monado = handleTest ./monado.nix {};
+  monetdb = handleTest ./monetdb.nix {};
   monica = handleTest ./web-apps/monica.nix {};
   mongodb = handleTest ./mongodb.nix {};
   moodle = handleTest ./moodle.nix {};
@@ -695,6 +698,7 @@ in {
   pgmanage = handleTest ./pgmanage.nix {};
   pgvecto-rs = handleTest ./pgvecto-rs.nix {};
   phosh = handleTest ./phosh.nix {};
+  photonvision = handleTest ./photonvision.nix {};
   photoprism = handleTest ./photoprism.nix {};
   php = handleTest ./php {};
   php81 = handleTest ./php { php = pkgs.php81; };
@@ -788,6 +792,7 @@ in {
   sanoid = handleTest ./sanoid.nix {};
   scaphandre = handleTest ./scaphandre.nix {};
   schleuder = handleTest ./schleuder.nix {};
+  scion-freestanding-deployment = handleTest ./scion/freestanding-deployment {};
   scrutiny = handleTest ./scrutiny.nix {};
   sddm = handleTest ./sddm.nix {};
   seafile = handleTest ./seafile.nix {};
@@ -899,6 +904,7 @@ in {
   systemd-sysusers-immutable = runTest ./systemd-sysusers-immutable.nix;
   systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
   systemd-timesyncd-nscd-dnssec = handleTest ./systemd-timesyncd-nscd-dnssec.nix {};
+  systemd-user-linger = handleTest ./systemd-user-linger.nix {};
   systemd-user-tmpfiles-rules = handleTest ./systemd-user-tmpfiles-rules.nix {};
   systemd-misc = handleTest ./systemd-misc.nix {};
   systemd-userdbd = handleTest ./systemd-userdbd.nix {};
@@ -959,6 +965,7 @@ in {
   user-activation-scripts = handleTest ./user-activation-scripts.nix {};
   user-expiry = runTest ./user-expiry.nix;
   user-home-mode = handleTest ./user-home-mode.nix {};
+  ustreamer = handleTest ./ustreamer.nix {};
   uwsgi = handleTest ./uwsgi.nix {};
   v2ray = handleTest ./v2ray.nix {};
   varnish60 = handleTest ./varnish.nix { package = pkgs.varnish60; };
diff --git a/nixpkgs/nixos/tests/armagetronad.nix b/nixpkgs/nixos/tests/armagetronad.nix
index ff2841dedd21..d59827354b77 100644
--- a/nixpkgs/nixos/tests/armagetronad.nix
+++ b/nixpkgs/nixos/tests/armagetronad.nix
@@ -1,4 +1,9 @@
-import ./make-test-python.nix ({ pkgs, ...} :
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
 
 let
   user = "alice";
@@ -16,7 +21,8 @@ let
       test-support.displayManager.auto.user = user;
     };
 
-in {
+in
+makeTest {
   name = "armagetronad";
   meta = with pkgs.lib.maintainers; {
     maintainers = [ numinit ];
@@ -269,4 +275,4 @@ in {
         srv.node.wait_until_fails(f"ss --numeric --udp --listening | grep -q {srv.port}")
     '';
 
-})
+}
diff --git a/nixpkgs/nixos/tests/firefoxpwa.nix b/nixpkgs/nixos/tests/firefoxpwa.nix
new file mode 100644
index 000000000000..374d67b01ac6
--- /dev/null
+++ b/nixpkgs/nixos/tests/firefoxpwa.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "firefoxpwa";
+  meta.maintainers = with lib.maintainers; [ camillemndn ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      imports = [ ./common/x11.nix ];
+      environment.systemPackages = with pkgs; [ firefoxpwa jq ];
+
+      programs.firefox = {
+        enable = true;
+        nativeMessagingHosts.packages = [ pkgs.firefoxpwa ];
+      };
+
+      services.jellyfin.enable = true;
+    };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.start()
+
+    with subtest("Install a progressive web app"):
+        machine.wait_for_unit("jellyfin.service")
+        machine.wait_for_open_port(8096)
+        machine.succeed("firefoxpwa site install http://localhost:8096/web/manifest.json >&2")
+
+    with subtest("Launch the progressive web app"):
+        machine.succeed("firefoxpwa site launch $(jq -r < ~/.local/share/firefoxpwa/config.json '.sites | keys[0]') >&2")
+        machine.wait_for_window("Jellyfin")
+        machine.wait_for_text("Jellyfin")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/goss.nix b/nixpkgs/nixos/tests/goss.nix
index 6b772d19215e..2e77b2734464 100644
--- a/nixpkgs/nixos/tests/goss.nix
+++ b/nixpkgs/nixos/tests/goss.nix
@@ -28,10 +28,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         };
         group.root.exists = true;
         kernel-param."kernel.ostype".value = "Linux";
-        service.goss = {
-          enabled = true;
-          running = true;
-        };
         user.root.exists = true;
       };
     };
@@ -46,8 +42,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     with subtest("returns health status"):
       result = json.loads(machine.succeed("curl -sS http://localhost:8080/healthz"))
 
-      assert len(result["results"]) == 10, f".results should be an array of 10 items, was {result['results']!r}"
+      assert len(result["results"]) == 8, f".results should be an array of 10 items, was {result['results']!r}"
       assert result["summary"]["failed-count"] == 0, f".summary.failed-count should be zero, was {result['summary']['failed-count']}"
-      assert result["summary"]["test-count"] == 10, f".summary.test-count should be 10, was {result['summary']['test-count']}"
+      assert result["summary"]["test-count"] == 8, f".summary.test-count should be 10, was {result['summary']['test-count']}"
     '';
 })
diff --git a/nixpkgs/nixos/tests/incus/container.nix b/nixpkgs/nixos/tests/incus/container.nix
index 9260f70da98c..a71c5355046a 100644
--- a/nixpkgs/nixos/tests/incus/container.nix
+++ b/nixpkgs/nixos/tests/incus/container.nix
@@ -1,20 +1,21 @@
-import ../make-test-python.nix ({ pkgs, lib, extra ? {}, ... } :
+import ../make-test-python.nix ({ pkgs, lib, extra ? {}, name ? "incus-container", ... } :
 
 let
   releases = import ../../release.nix {
-    configuration = {
-      # Building documentation makes the test unnecessarily take a longer time:
-      documentation.enable = lib.mkForce false;
+    configuration = lib.recursiveUpdate {
+        # Building documentation makes the test unnecessarily take a longer time:
+        documentation.enable = lib.mkForce false;
 
-      boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
-    } // extra;
+        boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
+    }
+    extra;
   };
 
   container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
   container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
 in
 {
-  name = "incus-container";
+  inherit name;
 
   meta = {
     maintainers = lib.teams.lxc.members;
diff --git a/nixpkgs/nixos/tests/incus/default.nix b/nixpkgs/nixos/tests/incus/default.nix
index 32bc5396a164..b850c4fba018 100644
--- a/nixpkgs/nixos/tests/incus/default.nix
+++ b/nixpkgs/nixos/tests/incus/default.nix
@@ -5,16 +5,22 @@
   handleTestOn,
 }:
 {
-  container-old-init = import ./container.nix { inherit system pkgs; };
-  container-new-init = import ./container.nix { inherit system pkgs; extra = {
-    # Enable new systemd init
-    boot.initrd.systemd.enable = true;
-  }; };
+  container-legacy-init = import ./container.nix {
+    name = "container-legacy-init";
+    inherit system pkgs;
+  };
+  container-systemd-init = import ./container.nix {
+    name = "container-systemd-init";
+    inherit system pkgs;
+    extra = {
+      boot.initrd.systemd.enable = true;
+    };
+  };
   lxd-to-incus = import ./lxd-to-incus.nix { inherit system pkgs; };
   openvswitch = import ./openvswitch.nix { inherit system pkgs; };
   preseed = import ./preseed.nix { inherit system pkgs; };
   socket-activated = import ./socket-activated.nix { inherit system pkgs; };
   storage = import ./storage.nix { inherit system pkgs; };
-  ui = import ./ui.nix {inherit system pkgs;};
+  ui = import ./ui.nix { inherit system pkgs; };
   virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix { inherit system pkgs; };
 }
diff --git a/nixpkgs/nixos/tests/incus/lxd-to-incus.nix b/nixpkgs/nixos/tests/incus/lxd-to-incus.nix
index 262f63c0f26f..e93b76591eca 100644
--- a/nixpkgs/nixos/tests/incus/lxd-to-incus.nix
+++ b/nixpkgs/nixos/tests/incus/lxd-to-incus.nix
@@ -95,7 +95,7 @@ import ../make-test-python.nix (
       machine.wait_for_unit("incus.service")
 
       with machine.nested("run migration"):
-          machine.succeed("lxd-to-incus --yes")
+          machine.succeed("${pkgs.incus}/bin/lxd-to-incus --yes")
 
       with machine.nested("verify resources migrated to incus"):
           machine.succeed("incus config show container")
diff --git a/nixpkgs/nixos/tests/k3s/multi-node.nix b/nixpkgs/nixos/tests/k3s/multi-node.nix
index 932b4639b39c..20279f3ca4b9 100644
--- a/nixpkgs/nixos/tests/k3s/multi-node.nix
+++ b/nixpkgs/nixos/tests/k3s/multi-node.nix
@@ -128,9 +128,7 @@ import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
       };
     };
 
-    meta = with pkgs.lib.maintainers; {
-      maintainers = [ euank ];
-    };
+    meta.maintainers = k3s.meta.maintainers;
 
     testScript = ''
       machines = [server, server2, agent]
diff --git a/nixpkgs/nixos/tests/k3s/single-node.nix b/nixpkgs/nixos/tests/k3s/single-node.nix
index e059603b9c9d..fd64a050e61e 100644
--- a/nixpkgs/nixos/tests/k3s/single-node.nix
+++ b/nixpkgs/nixos/tests/k3s/single-node.nix
@@ -25,9 +25,7 @@ import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
   in
   {
     name = "${k3s.name}-single-node";
-    meta = with pkgs.lib.maintainers; {
-      maintainers = [ euank ];
-    };
+    meta.maintainers = k3s.meta.maintainers;
 
     nodes.machine = { pkgs, ... }: {
       environment.systemPackages = with pkgs; [ k3s gzip ];
diff --git a/nixpkgs/nixos/tests/kavita.nix b/nixpkgs/nixos/tests/kavita.nix
index f27b3fffbcf6..bb55e1fb29d4 100644
--- a/nixpkgs/nixos/tests/kavita.nix
+++ b/nixpkgs/nixos/tests/kavita.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, ... }: {
   name = "kavita";
   meta = with pkgs.lib.maintainers; {
     maintainers = [ misterio77 ];
@@ -8,29 +8,35 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     kavita = { config, pkgs, ... }: {
       services.kavita = {
         enable = true;
-        port = 5000;
-        tokenKeyFile = builtins.toFile "kavita.key" "QfpjFvjT83BLtZ74GE3U3Q==";
+        tokenKeyFile = builtins.toFile "kavita.key" "d26ba694b455271a8872415830fb7b5c58f8da98f9ef7f58b2ca4c34bd406512";
       };
     };
   };
 
-  testScript = let
-    regUrl = "http://kavita:5000/api/Account/register";
-    payload = builtins.toFile "payload.json" (builtins.toJSON {
-      username = "foo";
-      password = "correcthorsebatterystaple";
-      email = "foo@bar";
-    });
-  in ''
-    kavita.start
-    kavita.wait_for_unit("kavita.service")
+  testScript =
+    let
+      regUrl = "http://kavita:5000/api/Account/register";
+      loginUrl = "http://kavita:5000/api/Account/login";
+      localeUrl = "http://kavita:5000/api/locale";
+    in
+    ''
+      import json
 
-    # Check that static assets are working
-    kavita.wait_until_succeeds("curl http://kavita:5000/site.webmanifest | grep Kavita")
+      kavita.start
+      kavita.wait_for_unit("kavita.service")
 
-    # Check that registration is working
-    kavita.succeed("curl -fX POST ${regUrl} --json @${payload}")
-    # But only for the first one
-    kavita.fail("curl -fX POST ${regUrl} --json @${payload}")
-  '';
+      # Check that static assets are working
+      kavita.wait_until_succeeds("curl http://kavita:5000/site.webmanifest | grep Kavita")
+
+      # Check that registration is working
+      kavita.succeed("""curl -fX POST ${regUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""")
+      # But only for the first one
+      kavita.fail("""curl -fX POST ${regUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""")
+
+      # Log in and retrieve token
+      session = json.loads(kavita.succeed("""curl -fX POST ${loginUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'"""))
+      # Check list of locales
+      locales = json.loads(kavita.succeed(f"curl -fX GET ${localeUrl} -H 'Authorization: Bearer {session['token']}'"))
+      assert len(locales) > 0, "expected a list of locales"
+    '';
 })
diff --git a/nixpkgs/nixos/tests/make-test-python.nix b/nixpkgs/nixos/tests/make-test-python.nix
index 32531fffd2bf..28569f1d2955 100644
--- a/nixpkgs/nixos/tests/make-test-python.nix
+++ b/nixpkgs/nixos/tests/make-test-python.nix
@@ -1,5 +1,5 @@
 f: {
-  system,
+  system ? builtins.currentSystem,
   pkgs ? import ../.. { inherit system; config = {}; overlays = []; },
   ...
 } @ args:
diff --git a/nixpkgs/nixos/tests/mollysocket.nix b/nixpkgs/nixos/tests/mollysocket.nix
new file mode 100644
index 000000000000..8cbd0c0272e0
--- /dev/null
+++ b/nixpkgs/nixos/tests/mollysocket.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  port = 1234;
+in {
+  name = "mollysocket";
+  meta.maintainers = with lib.maintainers; [ dotlambda ];
+
+  nodes.mollysocket = { ... }: {
+    services.mollysocket = {
+      enable = true;
+      settings = {
+        inherit port;
+      };
+    };
+  };
+
+  testScript = ''
+    import json
+
+    mollysocket.wait_for_unit("mollysocket.service")
+    mollysocket.wait_for_open_port(${toString port})
+
+    out = mollysocket.succeed("curl --fail http://127.0.0.1:${toString port}")
+    assert json.loads(out)["mollysocket"]["version"] == "${toString pkgs.mollysocket.version}"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/monetdb.nix b/nixpkgs/nixos/tests/monetdb.nix
new file mode 100644
index 000000000000..acbf01f76975
--- /dev/null
+++ b/nixpkgs/nixos/tests/monetdb.nix
@@ -0,0 +1,77 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+  let creds = pkgs.writeText ".monetdb" ''
+        user=monetdb
+        password=monetdb
+      '';
+      createUser = pkgs.writeText "createUser.sql" ''
+        CREATE USER "voc" WITH PASSWORD 'voc' NAME 'VOC Explorer' SCHEMA "sys";
+        CREATE SCHEMA "voc" AUTHORIZATION "voc";
+        ALTER USER "voc" SET SCHEMA "voc";
+      '';
+      credsVoc = pkgs.writeText ".monetdb" ''
+        user=voc
+        password=voc
+      '';
+      transaction = pkgs.writeText "transaction" ''
+        START TRANSACTION;
+        CREATE TABLE test (id int, data varchar(30));
+        ROLLBACK;
+      '';
+      vocData = pkgs.fetchzip {
+        url = "https://dev.monetdb.org/Assets/VOC/voc_dump.zip";
+        hash = "sha256-sQ5acTsSAiXQfOgt2PhN7X7Z9TZGZtLrPPxgQT2pCGQ=";
+      };
+      onboardPeople = pkgs.writeText "onboardPeople" ''
+        CREATE VIEW onboard_people AS
+        SELECT * FROM (
+        SELECT 'craftsmen' AS type, craftsmen.* FROM craftsmen
+        UNION ALL
+        SELECT 'impotenten' AS type, impotenten.* FROM impotenten
+        UNION ALL
+        SELECT 'passengers' AS type, passengers.* FROM passengers
+        UNION ALL
+        SELECT 'seafarers' AS type, seafarers.* FROM seafarers
+        UNION ALL
+        SELECT 'soldiers' AS type, soldiers.* FROM soldiers
+        UNION ALL
+        SELECT 'total' AS type, total.* FROM total
+        ) AS onboard_people_table;
+        SELECT type, COUNT(*) AS total
+        FROM onboard_people GROUP BY type ORDER BY type;
+      '';
+      onboardExpected = pkgs.lib.strings.replaceStrings ["\n"] ["\\n"] ''
+        +------------+-------+
+        | type       | total |
+        +============+=======+
+        | craftsmen  |  2349 |
+        | impotenten |   938 |
+        | passengers |  2813 |
+        | seafarers  |  4468 |
+        | soldiers   |  4177 |
+        | total      |  2467 |
+        +------------+-------+
+      '';
+  in {
+    name = "monetdb";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ StillerHarpo ];
+    };
+    nodes.machine.services.monetdb.enable = true;
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("monetdb")
+      machine.succeed("monetdbd create mydbfarm")
+      machine.succeed("monetdbd start mydbfarm")
+      machine.succeed("monetdb create voc")
+      machine.succeed("monetdb release voc")
+      machine.succeed("cp ${creds} ./.monetdb")
+      assert "hello world" in machine.succeed("mclient -d voc -s \"SELECT 'hello world'\"")
+      machine.succeed("mclient -d voc ${createUser}")
+      machine.succeed("cp ${credsVoc} ./.monetdb")
+      machine.succeed("mclient -d voc ${transaction}")
+      machine.succeed("mclient -d voc ${vocData}/voc_dump.sql")
+      assert "8131" in machine.succeed("mclient -d voc -s \"SELECT count(*) FROM voyages\"")
+      assert "${onboardExpected}" in machine.succeed("mclient -d voc ${onboardPeople}")
+
+  '';
+  })
diff --git a/nixpkgs/nixos/tests/nix-ld.nix b/nixpkgs/nixos/tests/nix-ld.nix
index 8733f5b0c397..9b851f88617a 100644
--- a/nixpkgs/nixos/tests/nix-ld.nix
+++ b/nixpkgs/nixos/tests/nix-ld.nix
@@ -1,17 +1,39 @@
-import ./make-test-python.nix ({ lib, pkgs, ...} :
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  shared =
+    { config, pkgs, ... }:
+    {
+      programs.nix-ld.enable = true;
+      environment.systemPackages = [
+        (pkgs.runCommand "patched-hello" { } ''
+          install -D -m755 ${pkgs.hello}/bin/hello $out/bin/hello
+          patchelf $out/bin/hello --set-interpreter $(cat ${config.programs.nix-ld.package}/nix-support/ldpath)
+        '')
+      ];
+    };
+in
 {
-  name = "nix-ld";
-  nodes.machine = { pkgs, ... }: {
-    programs.nix-ld.enable = true;
-    environment.systemPackages = [
-      (pkgs.runCommand "patched-hello" {} ''
-        install -D -m755 ${pkgs.hello}/bin/hello $out/bin/hello
-        patchelf $out/bin/hello --set-interpreter $(cat ${pkgs.nix-ld}/nix-support/ldpath)
-      '')
-    ];
+  nix-ld = makeTest {
+    name = "nix-ld";
+    nodes.machine = shared;
+    testScript = ''
+      start_all()
+      machine.succeed("hello")
+    '';
   };
-  testScript = ''
-    start_all()
-    machine.succeed("hello")
- '';
-})
+  nix-ld-rs = makeTest {
+    name = "nix-ld-rs";
+    nodes.machine = {
+      imports = [ shared ];
+      programs.nix-ld.package = pkgs.nix-ld-rs;
+    };
+    testScript = ''
+      start_all()
+      machine.succeed("hello")
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/nixos-rebuild-install-bootloader.nix b/nixpkgs/nixos/tests/nixos-rebuild-install-bootloader.nix
index 3ade90ea24a7..94554a93bd63 100644
--- a/nixpkgs/nixos/tests/nixos-rebuild-install-bootloader.nix
+++ b/nixpkgs/nixos/tests/nixos-rebuild-install-bootloader.nix
@@ -60,7 +60,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
       # Need to run `nixos-rebuild` twice because the first run will install
       # GRUB anyway
       with subtest("Switch system again and install bootloader"):
-          result = machine.succeed("nixos-rebuild switch --install-bootloader")
+          result = machine.succeed("nixos-rebuild switch --install-bootloader 2>&1")
           # install-grub2.pl messages
           assert "updating GRUB 2 menu..." in result
           assert "installing the GRUB 2 boot loader on /dev/vda..." in result
diff --git a/nixpkgs/nixos/tests/photonvision.nix b/nixpkgs/nixos/tests/photonvision.nix
new file mode 100644
index 000000000000..2cadaa4bc02e
--- /dev/null
+++ b/nixpkgs/nixos/tests/photonvision.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "photonvision";
+
+  nodes = {
+    machine = { pkgs, ... }: {
+      services.photonvision = {
+        enable = true;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("photonvision.service")
+    machine.wait_for_open_port(5800)
+  '';
+
+  meta.maintainers = with lib.maintainers; [ max-niederman ];
+})
+
diff --git a/nixpkgs/nixos/tests/scion/freestanding-deployment/README.rst b/nixpkgs/nixos/tests/scion/freestanding-deployment/README.rst
new file mode 100644
index 000000000000..b2448a2dc9ad
--- /dev/null
+++ b/nixpkgs/nixos/tests/scion/freestanding-deployment/README.rst
@@ -0,0 +1,12 @@
+This NixOS VM test implements the network topology outlined in https://github.com/scionproto/scion/blob/27983125bccac6b84d1f96f406853aab0e460405/doc/tutorials/deploy.rst#sample-scion-demo-topology, below is an excerpt from that document
+
+Sample SCION Demo Topology
+..........................
+
+The topology of the ISD includes the inter-AS connections to neighboring ASes, and defines the underlay IP/UDP addresses of services and routers running in this AS. This is specified in topology files - this guide later explains how to configure these files. A following graphic depicts the topology on a high level.
+
+.. figure:: https://github.com/scionproto/scion/raw/27983125bccac6b84d1f96f406853aab0e460405/doc/tutorials/deploy/SCION-deployment-guide.drawio.png
+   :width: 95 %
+   :figwidth: 100 %
+
+   *Figure 1 - Topology of the sample SCION demo environment. It consists of 1 ISD, 3 core ASes and 2 non-core ASes.*
diff --git a/nixpkgs/nixos/tests/scion/freestanding-deployment/default.nix b/nixpkgs/nixos/tests/scion/freestanding-deployment/default.nix
new file mode 100644
index 000000000000..0c9686fbfbad
--- /dev/null
+++ b/nixpkgs/nixos/tests/scion/freestanding-deployment/default.nix
@@ -0,0 +1,172 @@
+# implements https://github.com/scionproto/scion/blob/27983125bccac6b84d1f96f406853aab0e460405/doc/tutorials/deploy.rst
+import ../../make-test-python.nix ({ pkgs, ... }:
+let
+  trust-root-configuration-keys = pkgs.runCommand "generate-trc-keys.sh" {
+    buildInputs = [
+      pkgs.scion
+    ];
+  } ''
+    set -euo pipefail
+
+    mkdir /tmp/tutorial-scion-certs && cd /tmp/tutorial-scion-certs
+    mkdir AS{1..5}
+
+    # Create voting and root keys and (self-signed) certificates for core ASes
+    pushd AS1
+    scion-pki certificate create --not-after=3650d --profile=sensitive-voting <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 sensitive voting cert"}') sensitive-voting.pem sensitive-voting.key
+    scion-pki certificate create --not-after=3650d --profile=regular-voting <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 regular voting cert"}') regular-voting.pem regular-voting.key
+    scion-pki certificate create --not-after=3650d --profile=cp-root <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 cp root cert"}') cp-root.pem cp-root.key
+    popd
+
+    pushd AS2
+    scion-pki certificate create --not-after=3650d --profile=cp-root <(echo '{"isd_as": "42-ffaa:1:2", "common_name": "42-ffaa:1:2 cp root cert"}') cp-root.pem cp-root.key
+    popd
+
+    pushd AS3
+    scion-pki certificate create --not-after=3650d --profile=sensitive-voting <(echo '{"isd_as": "42-ffaa:1:3", "common_name": "42-ffaa:1:3 sensitive voting cert"}') sensitive-voting.pem sensitive-voting.key
+    scion-pki certificate create --not-after=3650d --profile=regular-voting <(echo '{"isd_as": "42-ffaa:1:3", "common_name": "42-ffaa:1:3 regular voting cert"}') regular-voting.pem regular-voting.key
+    popd
+
+    # Create the TRC (Trust Root Configuration)
+    mkdir tmp
+    echo '
+    isd = 42
+    description = "Demo ISD 42"
+    serial_version = 1
+    base_version = 1
+    voting_quorum = 2
+
+    core_ases = ["ffaa:1:1", "ffaa:1:2", "ffaa:1:3"]
+    authoritative_ases = ["ffaa:1:1", "ffaa:1:2", "ffaa:1:3"]
+    cert_files = ["AS1/sensitive-voting.pem", "AS1/regular-voting.pem", "AS1/cp-root.pem", "AS2/cp-root.pem", "AS3/sensitive-voting.pem", "AS3/regular-voting.pem"]
+
+    [validity]
+    not_before = '$(date +%s)'
+    validity = "365d"' \
+    > trc-B1-S1-pld.tmpl
+
+    scion-pki trc payload --out=tmp/ISD42-B1-S1.pld.der --template trc-B1-S1-pld.tmpl
+    rm trc-B1-S1-pld.tmpl
+
+    # Sign and bundle the TRC
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS1/sensitive-voting.{pem,key} --out tmp/ISD42-B1-S1.AS1-sensitive.trc
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS1/regular-voting.{pem,key} --out tmp/ISD42-B1-S1.AS1-regular.trc
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS3/sensitive-voting.{pem,key} --out tmp/ISD42-B1-S1.AS3-sensitive.trc
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS3/regular-voting.{pem,key} --out tmp/ISD42-B1-S1.AS3-regular.trc
+
+    scion-pki trc combine tmp/ISD42-B1-S1.AS{1,3}-{sensitive,regular}.trc --payload tmp/ISD42-B1-S1.pld.der --out ISD42-B1-S1.trc
+    rm tmp -r
+
+    # Create CA key and certificate for issuing ASes
+    pushd AS1
+    scion-pki certificate create --profile=cp-ca <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 CA cert"}') cp-ca.pem cp-ca.key --ca cp-root.pem --ca-key cp-root.key
+    popd
+    pushd AS2
+    scion-pki certificate create --profile=cp-ca <(echo '{"isd_as": "42-ffaa:1:2", "common_name": "42-ffaa:1:2 CA cert"}') cp-ca.pem cp-ca.key --ca cp-root.pem --ca-key cp-root.key
+    popd
+
+    # Create AS key and certificate chains
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 AS cert"}') AS1/cp-as.pem AS1/cp-as.key --ca AS1/cp-ca.pem --ca-key AS1/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:2", "common_name": "42-ffaa:1:2 AS cert"}') AS2/cp-as.pem AS2/cp-as.key --ca AS2/cp-ca.pem --ca-key AS2/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:3", "common_name": "42-ffaa:1:3 AS cert"}') AS3/cp-as.pem AS3/cp-as.key --ca AS1/cp-ca.pem --ca-key AS1/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:4", "common_name": "42-ffaa:1:4 AS cert"}') AS4/cp-as.pem AS4/cp-as.key --ca AS1/cp-ca.pem --ca-key AS1/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:5", "common_name": "42-ffaa:1:5 AS cert"}') AS5/cp-as.pem AS5/cp-as.key --ca AS2/cp-ca.pem --ca-key AS2/cp-ca.key --bundle
+
+    for i in {1..5}
+    do
+      mkdir -p $out/AS$i
+      cp AS$i/cp-as.{key,pem} $out/AS$i
+    done
+
+    mv *.trc $out
+  '';
+  imports = hostId: [
+    ({
+      services.scion = {
+        enable = true;
+        bypassBootstrapWarning = true;
+      };
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+      };
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "192.168.1.${toString hostId}/24";
+      };
+      environment.etc = {
+        "scion/topology.json".source = ./topology${toString hostId}.json;
+        "scion/crypto/as".source = trust-root-configuration-keys + "/AS${toString hostId}";
+        "scion/certs/ISD42-B1-S1.trc".source = trust-root-configuration-keys + "/ISD42-B1-S1.trc";
+        "scion/keys/master0.key".text = "U${toString hostId}v4k23ZXjGDwDofg/Eevw==";
+        "scion/keys/master1.key".text = "dBMko${toString hostId}qMS8DfrN/zP2OUdA==";
+      };
+      environment.systemPackages = [
+        pkgs.scion
+      ];
+    })
+  ];
+in
+{
+  name = "scion-test";
+  nodes = {
+    scion01 = { ... }: {
+      imports = (imports 1);
+    };
+    scion02 = { ... }: {
+      imports = (imports 2);
+    };
+    scion03 = { ... }: {
+      imports = (imports 3);
+    };
+    scion04 = { ... }: {
+      imports = (imports 4);
+    };
+    scion05 = { ... }: {
+      imports = (imports 5);
+    };
+  };
+  testScript = let
+    pingAll = pkgs.writeShellScript "ping-all-scion.sh" ''
+      addresses="42-ffaa:1:1 42-ffaa:1:2 42-ffaa:1:3 42-ffaa:1:4 42-ffaa:1:5"
+      timeout=100
+      wait_for_all() {
+        for as in "$@"
+        do
+          scion showpaths $as --no-probe > /dev/null
+          return 1
+        done
+        return 0
+      }
+      ping_all() {
+        for as in "$@"
+        do
+          scion ping "$as,127.0.0.1" -c 3
+        done
+        return 0
+      }
+      for i in $(seq 0 $timeout); do
+        wait_for_all $addresses && exit 0
+        ping_all $addresses && exit 0
+        sleep 1
+      done
+    '';
+  in
+  ''
+    # List of AS instances
+    machines = [scion01, scion02, scion03, scion04, scion05]
+
+    # Wait for scion-control.service on all instances
+    for i in machines:
+        i.wait_for_unit("scion-control.service")
+
+    # Execute pingAll command on all instances
+    for i in machines:
+        i.succeed("${pingAll} >&2")
+
+    # Restart scion-dispatcher and ping again to test robustness
+    for i in machines:
+        i.succeed("systemctl restart scion-dispatcher >&2")
+        i.succeed("${pingAll} >&2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/scion/freestanding-deployment/topology1.json b/nixpkgs/nixos/tests/scion/freestanding-deployment/topology1.json
new file mode 100644
index 000000000000..de51515eebc2
--- /dev/null
+++ b/nixpkgs/nixos/tests/scion/freestanding-deployment/topology1.json
@@ -0,0 +1,51 @@
+{
+  "attributes": [
+    "core"
+  ],
+  "isd_as": "42-ffaa:1:1",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.1:50014",
+            "remote": "192.168.1.4:50014"
+          },
+          "isd_as": "42-ffaa:1:4",
+          "link_to": "child",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.1:50012",
+            "remote": "192.168.1.2:50012"
+          },
+          "isd_as": "42-ffaa:1:2",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "3": {
+          "underlay": {
+            "public": "192.168.1.1:50013",
+            "remote": "192.168.1.3:50013"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "core",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixpkgs/nixos/tests/scion/freestanding-deployment/topology2.json b/nixpkgs/nixos/tests/scion/freestanding-deployment/topology2.json
new file mode 100644
index 000000000000..f8e10d5d1f75
--- /dev/null
+++ b/nixpkgs/nixos/tests/scion/freestanding-deployment/topology2.json
@@ -0,0 +1,51 @@
+{
+  "attributes": [
+    "core"
+  ],
+  "isd_as": "42-ffaa:1:2",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.2:50012",
+            "remote": "192.168.1.1:50012"
+          },
+          "isd_as": "42-ffaa:1:1",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.2:50023",
+            "remote": "192.168.1.3:50023"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "3": {
+          "underlay": {
+            "public": "192.168.1.2:50025",
+            "remote": "192.168.1.5:50025"
+          },
+          "isd_as": "42-ffaa:1:5",
+          "link_to": "child",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixpkgs/nixos/tests/scion/freestanding-deployment/topology3.json b/nixpkgs/nixos/tests/scion/freestanding-deployment/topology3.json
new file mode 100644
index 000000000000..53cee431885b
--- /dev/null
+++ b/nixpkgs/nixos/tests/scion/freestanding-deployment/topology3.json
@@ -0,0 +1,60 @@
+{
+  "attributes": [
+    "core"
+  ],
+  "isd_as": "42-ffaa:1:3",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.3:50013",
+            "remote": "192.168.1.1:50013"
+          },
+          "isd_as": "42-ffaa:1:1",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.3:50023",
+            "remote": "192.168.1.2:50023"
+          },
+          "isd_as": "42-ffaa:1:2",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "3": {
+          "underlay": {
+            "public": "192.168.1.3:50034",
+            "remote": "192.168.1.4:50034"
+          },
+          "isd_as": "42-ffaa:1:4",
+          "link_to": "child",
+          "mtu": 1472
+        },
+        "4": {
+          "underlay": {
+            "public": "192.168.1.3:50035",
+            "remote": "192.168.1.5:50035"
+          },
+          "isd_as": "42-ffaa:1:5",
+          "link_to": "child",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixpkgs/nixos/tests/scion/freestanding-deployment/topology4.json b/nixpkgs/nixos/tests/scion/freestanding-deployment/topology4.json
new file mode 100644
index 000000000000..03c507a4daf5
--- /dev/null
+++ b/nixpkgs/nixos/tests/scion/freestanding-deployment/topology4.json
@@ -0,0 +1,40 @@
+{
+  "attributes": [],
+  "isd_as": "42-ffaa:1:4",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.4:50014",
+            "remote": "192.168.1.1:50014"
+          },
+          "isd_as": "42-ffaa:1:1",
+          "link_to": "parent",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.4:50034",
+            "remote": "192.168.1.3:50034"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "parent",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixpkgs/nixos/tests/scion/freestanding-deployment/topology5.json b/nixpkgs/nixos/tests/scion/freestanding-deployment/topology5.json
new file mode 100644
index 000000000000..6114c1f73c2a
--- /dev/null
+++ b/nixpkgs/nixos/tests/scion/freestanding-deployment/topology5.json
@@ -0,0 +1,40 @@
+{
+  "attributes": [],
+  "isd_as": "42-ffaa:1:5",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.5:50025",
+            "remote": "192.168.1.2:50025"
+          },
+          "isd_as": "42-ffaa:1:2",
+          "link_to": "parent",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.5:50035",
+            "remote": "192.168.1.3:50035"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "parent",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixpkgs/nixos/tests/systemd-user-linger.nix b/nixpkgs/nixos/tests/systemd-user-linger.nix
new file mode 100644
index 000000000000..2c3d71668979
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-user-linger.nix
@@ -0,0 +1,39 @@
+import ./make-test-python.nix (
+  { lib, ... }:
+  {
+    name = "systemd-user-linger";
+
+    nodes.machine =
+      { ... }:
+      {
+        users.users = {
+          alice = {
+            isNormalUser = true;
+            linger = true;
+            uid = 1000;
+          };
+
+          bob = {
+            isNormalUser = true;
+            linger = false;
+            uid = 10001;
+          };
+        };
+      };
+
+    testScript =
+      { ... }:
+      ''
+        machine.wait_for_file("/var/lib/systemd/linger/alice")
+        machine.succeed("systemctl status user-1000.slice")
+
+        machine.fail("test -e /var/lib/systemd/linger/bob")
+        machine.fail("systemctl status user-1001.slice")
+
+        with subtest("missing users have linger purged"):
+            machine.succeed("touch /var/lib/systemd/linger/missing")
+            machine.systemctl("restart linger-users")
+            machine.succeed("test ! -e /var/lib/systemd/linger/missing")
+      '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/tracee.nix b/nixpkgs/nixos/tests/tracee.nix
index 3dadc0f9fdb3..1c241f3ec498 100644
--- a/nixpkgs/nixos/tests/tracee.nix
+++ b/nixpkgs/nixos/tests/tracee.nix
@@ -1,7 +1,13 @@
-import ./make-test-python.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, ... }: rec {
   name = "tracee-integration";
   meta.maintainers = pkgs.tracee.meta.maintainers;
 
+  passthru.hello-world-builder = pkgs: pkgs.dockerTools.buildImage {
+    name = "hello-world";
+    tag = "latest";
+    config.Cmd = [ "${pkgs.hello}/bin/hello" ];
+  };
+
   nodes = {
     machine = { config, pkgs, ... }: {
       # EventFilters/trace_only_events_from_new_containers and
@@ -12,57 +18,48 @@ import ./make-test-python.nix ({ pkgs, ... }: {
       environment.systemPackages = with pkgs; [
         # required by Test_EventFilters/trace_events_from_ls_and_which_binary_in_separate_scopes
         which
-        # build the go integration tests as a binary
-        (tracee.overrideAttrs (oa: {
-          pname = oa.pname + "-integration";
-          postPatch = oa.postPatch or "" + ''
-            # prepare tester.sh (which will be embedded in the test binary)
-            patchShebangs tests/integration/tester.sh
-
-            # fix the test to look at nixos paths for running programs
-            substituteInPlace tests/integration/integration_test.go \
-              --replace "bin=/usr/bin/" "comm=" \
-              --replace "binary=/usr/bin/" "comm=" \
-              --replace "/usr/bin/dockerd" "dockerd" \
-              --replace "/usr/bin" "/run/current-system/sw/bin"
-          '';
-          nativeBuildInputs = oa.nativeBuildInputs or [ ] ++ [ makeWrapper ];
-          buildPhase = ''
-            runHook preBuild
-            # just build the static lib we need for the go test binary
-            make $makeFlags ''${enableParallelBuilding:+-j$NIX_BUILD_CORES} bpf-core ./dist/btfhub
-
-            # then compile the tests to be ran later
-            CGO_LDFLAGS="$(pkg-config --libs libbpf)" go test -tags core,ebpf,integration -p 1 -c -o $GOPATH/tracee-integration ./tests/integration/...
-            runHook postBuild
-          '';
-          doCheck = false;
-          outputs = [ "out" ];
-          installPhase = ''
-            mkdir -p $out/bin
-            mv $GOPATH/tracee-integration $out/bin/
-          '';
-          doInstallCheck = false;
-
-          meta = oa.meta // {
-            outputsToInstall = [];
-          };
-        }))
+        # the go integration tests as a binary
+        tracee.passthru.tests.integration-test-cli
       ];
     };
   };
 
-  testScript = ''
-    machine.wait_for_unit("docker.service")
+  testScript =
+    let
+      skippedTests = [
+        # these comm tests for some reason do not resolve.
+        # something about the test is different as it works fine if I replicate
+        # the policies and run tracee myself but doesn't work in the integration
+        # test either with the automatic run or running the commands by hand
+        # while it's searching.
+        "Test_EventFilters/comm:_event:_args:_trace_event_set_in_a_specific_policy_with_args_from_ls_command"
+        "Test_EventFilters/comm:_event:_trace_events_set_in_two_specific_policies_from_ls_and_uname_commands"
+
+        # worked at some point, seems to be flakey
+        "Test_EventFilters/pid:_event:_args:_trace_event_sched_switch_with_args_from_pid_0"
+      ];
+    in
+    ''
+      with subtest("prepare for integration tests"):
+        machine.wait_for_unit("docker.service")
+        machine.succeed('which bash')
+
+        # EventFilters/trace_only_events_from_new_containers also requires a container called "hello-world"
+        machine.succeed('docker load < ${passthru.hello-world-builder pkgs}')
 
-    with subtest("run integration tests"):
-      # EventFilters/trace_only_events_from_new_containers also requires a container called "alpine"
-      machine.succeed('tar c -C ${pkgs.pkgsStatic.busybox} . | docker import - alpine --change "ENTRYPOINT [\"sleep\"]"')
+        # exec= needs fully resolved paths
+        machine.succeed(
+          'mkdir /tmp/testdir',
+          'cp $(which who) /tmp/testdir/who',
+          'cp $(which uname) /tmp/testdir/uname',
+        )
 
-      # Test_EventFilters/trace_event_set_in_a_specific_scope expects to be in a dir that includes "integration"
-      print(machine.succeed(
-        'mkdir /tmp/integration',
-        'cd /tmp/integration && tracee-integration -test.v'
-      ))
-  '';
+      with subtest("run integration tests"):
+        # Test_EventFilters/trace_event_set_in_a_specific_scope expects to be in a dir that includes "integration"
+        # tests must be ran with 1 process
+        print(machine.succeed(
+          'mkdir /tmp/integration',
+          'cd /tmp/integration && export PATH="/tmp/testdir:$PATH" && integration.test -test.v -test.parallel 1 -test.skip="^${builtins.concatStringsSep "$|^" skippedTests}$"'
+        ))
+    '';
 })
diff --git a/nixpkgs/nixos/tests/ustreamer.nix b/nixpkgs/nixos/tests/ustreamer.nix
new file mode 100644
index 000000000000..1354eb03a326
--- /dev/null
+++ b/nixpkgs/nixos/tests/ustreamer.nix
@@ -0,0 +1,75 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "ustreamer-vmtest";
+  nodes = {
+    client = {...}: {
+      environment.systemPackages = [ pkgs.curl ];
+    };
+    camera = {config, ...}: let
+      configFile = pkgs.writeText "akvcam-configFile" ''
+        [Cameras]
+        cameras/size = 2
+
+        cameras/1/type = output
+        cameras/1/mode = mmap, userptr, rw
+        cameras/1/description = Virtual Camera (output device)
+        cameras/1/formats = 2
+        cameras/1/videonr = 7
+
+        cameras/2/type = capture
+        cameras/2/mode = mmap, rw
+        cameras/2/description = Virtual Camera
+        cameras/2/formats = 1, 2
+        cameras/2/videonr = 9
+
+        [Connections]
+        connections/size = 1
+        connections/1/connection = 1:2
+
+        [Formats]
+        formats/size = 2
+
+        formats/1/format = YUY2
+        formats/1/width = 640
+        formats/1/height = 480
+        formats/1/fps = 30
+
+        formats/2/format = RGB24, YUY2
+        formats/2/width = 640
+        formats/2/height = 480
+        formats/2/fps = 20/1, 15/2
+      '';
+    in {
+      environment.systemPackages = [ pkgs.ustreamer ];
+      networking.firewall.enable = false;
+      systemd.services.ustreamer = {
+        description = "ustreamer service";
+        wantedBy = ["multi-user.target"];
+        serviceConfig = {
+          DynamicUser = true;
+          ExecStart = "${pkgs.ustreamer}/bin/ustreamer --host=0.0.0.0 --port 8000 --device /dev/video9 --device-timeout=8";
+          PrivateTmp = true;
+          BindReadOnlyPaths = "/dev/video9";
+          SupplementaryGroups = [
+            "video"
+          ];
+          Restart = "always";
+        };
+      };
+      boot.extraModulePackages = [config.boot.kernelPackages.akvcam];
+      boot.kernelModules = ["akvcam"];
+      boot.extraModprobeConfig = ''
+        options akvcam config_file=${configFile}
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    camera.wait_for_unit("ustreamer.service")
+    camera.wait_for_open_port(8000)
+
+    client.wait_for_unit("multi-user.target")
+    client.succeed("curl http://camera:8000")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/web-apps/peertube.nix b/nixpkgs/nixos/tests/web-apps/peertube.nix
index 0e5f39c08a02..83c7cf03701e 100644
--- a/nixpkgs/nixos/tests/web-apps/peertube.nix
+++ b/nixpkgs/nixos/tests/web-apps/peertube.nix
@@ -17,16 +17,18 @@ import ../make-test-python.nix ({pkgs, ...}:
       services.postgresql = {
         enable = true;
         enableTCPIP = true;
+        ensureDatabases = [ "peertube_test" ];
+        ensureUsers = [
+          {
+            name = "peertube_test";
+            ensureDBOwnership = true;
+          }
+        ];
         authentication = ''
-          hostnossl peertube_local peertube_test 192.168.2.11/32 md5
+          hostnossl peertube_test peertube_test 192.168.2.11/32 md5
         '';
         initialScript = pkgs.writeText "postgresql_init.sql" ''
           CREATE ROLE peertube_test LOGIN PASSWORD '0gUN0C1mgST6czvjZ8T9';
-          CREATE DATABASE peertube_local TEMPLATE template0 ENCODING UTF8;
-          GRANT ALL PRIVILEGES ON DATABASE peertube_local TO peertube_test;
-          \connect peertube_local
-          CREATE EXTENSION IF NOT EXISTS pg_trgm;
-          CREATE EXTENSION IF NOT EXISTS unaccent;
         '';
       };
 
@@ -41,6 +43,9 @@ import ../make-test-python.nix ({pkgs, ...}:
     server = { pkgs, ... }: {
       environment = {
         etc = {
+          "peertube/password-init-root".text = ''
+            PT_INITIAL_ROOT_PASSWORD=zw4SqYVdcsXUfRX8aaFX
+          '';
           "peertube/secrets-peertube".text = ''
             063d9c60d519597acef26003d5ecc32729083965d09181ef3949200cbe5f09ee
           '';
@@ -70,13 +75,15 @@ import ../make-test-python.nix ({pkgs, ...}:
         localDomain = "peertube.local";
         enableWebHttps = false;
 
+        serviceEnvironmentFile = "/etc/peertube/password-init-root";
+
         secrets = {
           secretsFile = "/etc/peertube/secrets-peertube";
         };
 
         database = {
           host = "192.168.2.10";
-          name = "peertube_local";
+          name = "peertube_test";
           user = "peertube_test";
           passwordFile = "/etc/peertube/password-posgressql-db";
         };
@@ -99,7 +106,7 @@ import ../make-test-python.nix ({pkgs, ...}:
     };
 
     client = {
-      environment.systemPackages = [ pkgs.jq ];
+      environment.systemPackages = [ pkgs.jq pkgs.peertube.cli ];
       networking = {
        interfaces.eth1 = {
           ipv4.addresses = [
@@ -130,7 +137,10 @@ import ../make-test-python.nix ({pkgs, ...}:
     client.succeed("curl --fail http://peertube.local:9000/api/v1/config/about | jq -r '.instance.name' | grep 'PeerTube\ Test\ Server'")
 
     # Check PeerTube CLI version
-    assert "${pkgs.peertube.version}" in server.succeed('su - peertube -s /bin/sh -c "peertube --version"')
+    client.succeed('peertube-cli auth add -u "http://peertube.local:9000" -U "root" --password "zw4SqYVdcsXUfRX8aaFX"')
+    client.succeed('peertube-cli auth list | grep "http://peertube.local:9000"')
+    client.succeed('peertube-cli auth del "http://peertube.local:9000"')
+    client.fail('peertube-cli auth list | grep "http://peertube.local:9000"')
 
     client.shutdown()
     server.shutdown()