about summary refs log tree commit diff
path: root/nixpkgs/nixos
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2024-02-26 16:20:28 +0100
committerAlyssa Ross <hi@alyssa.is>2024-02-26 16:20:28 +0100
commit647438344bfc1f77791391e2b4f98eef865c63dc (patch)
treeef580867fc6cc413940e4330d939cf1afda082cb /nixpkgs/nixos
parentb084c6a0fab7f32c904c5c8e8db8dddcefbe507f (diff)
parente3474e1d1e53b70e2b2af73ea26d6340e82f6b8b (diff)
downloadnixlib-647438344bfc1f77791391e2b4f98eef865c63dc.tar
nixlib-647438344bfc1f77791391e2b4f98eef865c63dc.tar.gz
nixlib-647438344bfc1f77791391e2b4f98eef865c63dc.tar.bz2
nixlib-647438344bfc1f77791391e2b4f98eef865c63dc.tar.lz
nixlib-647438344bfc1f77791391e2b4f98eef865c63dc.tar.xz
nixlib-647438344bfc1f77791391e2b4f98eef865c63dc.tar.zst
nixlib-647438344bfc1f77791391e2b4f98eef865c63dc.zip
Merge commit 'e3474e1d1e53'
Diffstat (limited to 'nixpkgs/nixos')
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2405.section.md24
-rw-r--r--nixpkgs/nixos/lib/make-disk-image.nix3
-rw-r--r--nixpkgs/nixos/lib/test-driver/pyproject.toml1
-rw-r--r--nixpkgs/nixos/modules/config/nix.nix2
-rw-r--r--nixpkgs/nixos/modules/config/no-x-libs.nix2
-rw-r--r--nixpkgs/nixos/modules/hardware/printers.nix27
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel-no-zfs.nix12
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-no-zfs-installer.nix12
-rw-r--r--nixpkgs/nixos/modules/misc/nixpkgs-flake.nix105
-rw-r--r--nixpkgs/nixos/modules/module-list.nix5
-rw-r--r--nixpkgs/nixos/modules/programs/ccache.nix36
-rw-r--r--nixpkgs/nixos/modules/programs/steam.nix18
-rw-r--r--nixpkgs/nixos/modules/security/ca.nix14
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix59
-rw-r--r--nixpkgs/nixos/modules/services/development/lorri.nix3
-rw-r--r--nixpkgs/nixos/modules/services/games/archisteamfarm.nix2
-rw-r--r--nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix4
-rw-r--r--nixpkgs/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix39
-rw-r--r--nixpkgs/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix38
-rw-r--r--nixpkgs/nixos/modules/services/hardware/pcscd.nix9
-rw-r--r--nixpkgs/nixos/modules/services/hardware/thinkfan.nix9
-rw-r--r--nixpkgs/nixos/modules/services/misc/atuin.nix14
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix51
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/scrutiny.nix221
-rw-r--r--nixpkgs/nixos/modules/services/networking/bee-clef.nix107
-rw-r--r--nixpkgs/nixos/modules/services/networking/bee.nix11
-rw-r--r--nixpkgs/nixos/modules/services/networking/sabnzbd.nix34
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/zope2.nix262
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/deepin.nix9
-rw-r--r--nixpkgs/nixos/modules/system/boot/stage-1.nix8
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/initrd.nix2
-rw-r--r--nixpkgs/nixos/modules/system/boot/uki.nix17
-rw-r--r--nixpkgs/nixos/modules/system/etc/etc.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems.nix21
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/apfs.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/bcachefs.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/btrfs.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/cifs.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/ecryptfs.nix2
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/erofs.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/exfat.nix2
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/ext.nix6
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/f2fs.nix5
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/glusterfs.nix2
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/jfs.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/nfs.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/ntfs.nix2
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/reiserfs.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/squashfs.nix2
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/sshfs.nix10
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/unionfs-fuse.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/vboxsf.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/vfat.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/xfs.nix4
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/zfs.nix4
-rw-r--r--nixpkgs/nixos/modules/virtualisation/containers.nix70
-rw-r--r--nixpkgs/nixos/modules/virtualisation/cri-o.nix2
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker.nix12
-rw-r--r--nixpkgs/nixos/modules/virtualisation/hyperv-image.nix1
-rw-r--r--nixpkgs/nixos/modules/virtualisation/linode-config.nix1
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxc-container.nix12
-rw-r--r--nixpkgs/nixos/modules/virtualisation/oci-containers.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/podman/default.nix10
-rw-r--r--nixpkgs/nixos/modules/virtualisation/qemu-vm.nix6
-rw-r--r--nixpkgs/nixos/modules/virtualisation/vmware-image.nix1
-rw-r--r--nixpkgs/nixos/tests/activation/etc-overlay-immutable.nix14
-rw-r--r--nixpkgs/nixos/tests/activation/etc-overlay-mutable.nix18
-rw-r--r--nixpkgs/nixos/tests/all-tests.nix2
-rw-r--r--nixpkgs/nixos/tests/ccache.nix24
-rw-r--r--nixpkgs/nixos/tests/geoserver.nix65
-rw-r--r--nixpkgs/nixos/tests/incus/container.nix4
-rw-r--r--nixpkgs/nixos/tests/incus/default.nix6
-rw-r--r--nixpkgs/nixos/tests/installer.nix3
-rw-r--r--nixpkgs/nixos/tests/matomo.nix4
-rw-r--r--nixpkgs/nixos/tests/morph-browser.nix53
-rw-r--r--nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix6
-rw-r--r--nixpkgs/nixos/tests/power-profiles-daemon.nix21
-rw-r--r--nixpkgs/nixos/tests/scrutiny.nix70
-rw-r--r--nixpkgs/nixos/tests/systemd-boot.nix26
79 files changed, 1105 insertions, 599 deletions
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2405.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2405.section.md
index be754f99713b..b5973c19a2c4 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-2405.section.md
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2405.section.md
@@ -20,11 +20,23 @@ In addition to numerous new and upgraded packages, this release has the followin
    - This can be disabled through the `environment.stub-ld.enable` option.
    - If you use `programs.nix-ld.enable`, no changes are needed. The stub will be disabled automatically.
 
+- On flake-based NixOS configurations using `nixpkgs.lib.nixosSystem`, NixOS will automatically set `NIX_PATH` and the system-wide flake registry (`/etc/nix/registry.json`) to point `<nixpkgs>` and the unqualified flake path `nixpkgs` to the version of nixpkgs used to build the system.
+
+  This makes `nix run nixpkgs#hello` and `nix-build '<nixpkgs>' -A hello` work out of the box with no added configuration, reusing dependencies already on the system.
+
+  This may be undesirable if nix commands are not going to be run on the built system since it adds nixpkgs to the system closure. For such closure-size-constrained non-interactive systems, this setting should be disabled.
+
+  To disable this, set [nixpkgs.flake.setNixPath](#opt-nixpkgs.flake.setNixPath) and [nixpkgs.flake.setFlakeRegistry](#opt-nixpkgs.flake.setFlakeRegistry) to false.
+
 - Julia environments can now be built with arbitrary packages from the ecosystem using the `.withPackages` function. For example: `julia.withPackages ["Plots"]`.
 
 - A new option `systemd.sysusers.enable` was added. If enabled, users and
   groups are created with systemd-sysusers instead of with a custom perl script.
 
+- A new option `virtualisation.containers.cdi` was added. It contains `static` and `dynamic` attributes (corresponding to `/etc/cdi` and `/run/cdi` respectively) to configure the Container Device Interface (CDI).
+
+- `virtualisation.docker.enableNvidia` and `virtualisation.podman.enableNvidia` options are deprecated. `virtualisation.containers.cdi.dynamic.nvidia.enable` should be used instead. This option will expose GPUs on containers with the `--device` CLI option. This is supported by Docker 25, Podman 3.2.0 and Singularity 4. Any container runtime that supports the CDI specification will take advantage of this feature.
+
 - A new option `system.etc.overlay.enable` was added. If enabled, `/etc` is
   mounted via an overlayfs instead of being created by a custom perl script.
 
@@ -81,6 +93,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - [RustDesk](https://rustdesk.com), a full-featured open source remote control alternative for self-hosting and security with minimal configuration. Alternative to TeamViewer.
 
+- [Scrutiny](https://github.com/AnalogJ/scrutiny), a S.M.A.R.T monitoring tool for hard disks with a web frontend.
+
 - [systemd-lock-handler](https://git.sr.ht/~whynothugo/systemd-lock-handler/), a bridge between logind D-Bus events and systemd targets. Available as [services.systemd-lock-handler.enable](#opt-services.systemd-lock-handler.enable).
 
 ## Backward Incompatibilities {#sec-release-24.05-incompatibilities}
@@ -105,6 +119,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - `nitter` requires a `guest_accounts.jsonl` to be provided as a path or loaded into the default location at `/var/lib/nitter/guest_accounts.jsonl`. See [Guest Account Branch Deployment](https://github.com/zedeus/nitter/wiki/Guest-Account-Branch-Deployment) for details.
 
+- `boot.supportedFilesystems` and `boot.initrd.supportedFilesystems` are now attribute sets instead of lists. Assignment from lists as done previously is still supported, but checking whether a filesystem is enabled must now by done using `supportedFilesystems.fs or false` instead of using `lib.elem "fs" supportedFilesystems` as was done previously.
+
 - `services.aria2.rpcSecret` has been replaced with `services.aria2.rpcSecretFile`.
   This was done so that secrets aren't stored in the world-readable nix store.
   To migrate, you will have create a file with the same exact string, and change
@@ -155,6 +171,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
   - The bundled Lua was updated to Lua v5.2, which includes breaking changes. See the [Lua manual](https://www.lua.org/manual/5.2/manual.html#8) for more information.
   - The WebSocket API [was rewritten](https://github.com/MCJack123/craftos2/issues/337), which introduced breaking changes.
 
+- The `gtest` package has been updated past v1.13.0, which requires C++14 or higher.
+
 - The latest available version of Nextcloud is v28 (available as `pkgs.nextcloud28`). The installation logic is as follows:
   - If [`services.nextcloud.package`](#opt-services.nextcloud.package) is specified explicitly, this package will be installed (**recommended**)
   - If [`system.stateVersion`](#opt-system.stateVersion) is >=24.05, `pkgs.nextcloud28` will be installed by default.
@@ -165,12 +183,18 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - The `cudaPackages` package scope has been updated to `cudaPackages_12`.
 
+- Ada packages (libraries and tools) have been moved into the `gnatPackages` scope. `gnatPackages` uses the default GNAT compiler, `gnat12Packages` and `gnat13Packages` use the respective matching compiler version.
+
+- `spark2014` has been renamed to `gnatprove`. A version of `gnatprove` matching different GNAT versions is available from the different `gnatPackages` sets.
+
 - `services.resolved.fallbackDns` can now be used to disable the upstream fallback servers entirely by setting it to an empty list. To get the previous behaviour of the upstream defaults set it to null, the new default, instead.
 
 - `xxd` has been moved from `vim` default output to its own output to reduce closure size. The canonical way to reference it across all platforms is `unixtools.xxd`.
 
 - The `stalwart-mail` package has been updated to v0.5.3, which includes [breaking changes](https://github.com/stalwartlabs/mail-server/blob/v0.5.3/UPGRADING.md).
 
+- `services.zope2` has been removed as `zope2` is unmaintained and was relying on Python2.
+
 - `services.avahi.nssmdns` got split into `services.avahi.nssmdns4` and `services.avahi.nssmdns6` which enable the mDNS NSS switch for IPv4 and IPv6 respectively.
   Since most mDNS responders only register IPv4 addresses, most users want to keep the IPv6 support disabled to avoid long timeouts.
 
diff --git a/nixpkgs/nixos/lib/make-disk-image.nix b/nixpkgs/nixos/lib/make-disk-image.nix
index 1a33abd01ea1..047e72e2ac0d 100644
--- a/nixpkgs/nixos/lib/make-disk-image.nix
+++ b/nixpkgs/nixos/lib/make-disk-image.nix
@@ -536,6 +536,9 @@ let format' = format; in let
         concatStringsSep " " (lib.optional useEFIBoot "-drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}"
         ++ lib.optionals touchEFIVars [
           "-drive if=pflash,format=raw,unit=1,file=$efiVars"
+        ] ++ lib.optionals (OVMF.systemManagementModeRequired or false) [
+          "-machine" "q35,smm=on"
+          "-global" "driver=cfi.pflash01,property=secure,value=on"
         ]
       );
       inherit memSize;
diff --git a/nixpkgs/nixos/lib/test-driver/pyproject.toml b/nixpkgs/nixos/lib/test-driver/pyproject.toml
index 8638f14dfdae..17b7130a4bad 100644
--- a/nixpkgs/nixos/lib/test-driver/pyproject.toml
+++ b/nixpkgs/nixos/lib/test-driver/pyproject.toml
@@ -37,7 +37,6 @@ target-version = ['py39']
 include = '\.pyi?$'
 
 [tool.mypy]
-python_version = "3.10"
 warn_redundant_casts = true
 disallow_untyped_calls = true
 disallow_untyped_defs = true
diff --git a/nixpkgs/nixos/modules/config/nix.nix b/nixpkgs/nixos/modules/config/nix.nix
index 2769d8b25ef6..e6a74bbb73fc 100644
--- a/nixpkgs/nixos/modules/config/nix.nix
+++ b/nixpkgs/nixos/modules/config/nix.nix
@@ -1,5 +1,5 @@
 /*
-  Manages /etc/nix.conf.
+  Manages /etc/nix/nix.conf.
 
   See also
    - ./nix-channel.nix
diff --git a/nixpkgs/nixos/modules/config/no-x-libs.nix b/nixpkgs/nixos/modules/config/no-x-libs.nix
index 32b17f6059ef..870b3fe77cca 100644
--- a/nixpkgs/nixos/modules/config/no-x-libs.nix
+++ b/nixpkgs/nixos/modules/config/no-x-libs.nix
@@ -67,7 +67,7 @@ with lib;
       networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
       pango = super.pango.override { x11Support = false; };
       pinentry = super.pinentry.override { enabledFlavors = [ "curses" "tty" "emacs" ]; withLibsecret = false; };
-      pipewire = super.pipewire.override { x11Support = false; };
+      pipewire = super.pipewire.override { vulkanSupport = false; x11Support = false; };
       pythonPackagesExtensions = super.pythonPackagesExtensions ++ [
         (python-final: python-prev: {
           # tk feature requires wayland which fails to compile
diff --git a/nixpkgs/nixos/modules/hardware/printers.nix b/nixpkgs/nixos/modules/hardware/printers.nix
index 846ff6f3fb4f..4fb6a192cdd2 100644
--- a/nixpkgs/nixos/modules/hardware/printers.nix
+++ b/nixpkgs/nixos/modules/hardware/printers.nix
@@ -2,18 +2,23 @@
 with lib;
 let
   cfg = config.hardware.printers;
-  ppdOptionsString = options: optionalString (options != {})
-    (concatStringsSep " "
-      (mapAttrsToList (name: value: "-o '${name}'='${value}'") options)
-    );
-  ensurePrinter = p: ''
-    ${pkgs.cups}/bin/lpadmin -p '${p.name}' -E \
-      ${optionalString (p.location != null) "-L '${p.location}'"} \
-      ${optionalString (p.description != null) "-D '${p.description}'"} \
-      -v '${p.deviceUri}' \
-      -m '${p.model}' \
-      ${ppdOptionsString p.ppdOptions}
+
+  ensurePrinter = p: let
+    args = cli.toGNUCommandLineShell {} ({
+      p = p.name;
+      v = p.deviceUri;
+      m = p.model;
+    } // optionalAttrs (p.location != null) {
+      L = p.location;
+    } // optionalAttrs (p.description != null) {
+      D = p.description;
+    } // optionalAttrs (p.ppdOptions != {}) {
+      o = mapAttrsToList (name: value: "'${name}'='${value}'") p.ppdOptions;
+    });
+  in ''
+    ${pkgs.cups}/bin/lpadmin ${args} -E
   '';
+
   ensureDefaultPrinter = name: ''
     ${pkgs.cups}/bin/lpadmin -d '${name}'
   '';
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel-no-zfs.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel-no-zfs.nix
index 9d09cdbe0206..fc3cb08bdbbb 100644
--- a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel-no-zfs.nix
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel-no-zfs.nix
@@ -1,15 +1,7 @@
-{ pkgs, ... }:
+{ lib, ... }:
 
 {
   imports = [ ./installation-cd-minimal-new-kernel.nix ];
 
-  # Makes `availableOn` fail for zfs, see <nixos/modules/profiles/base.nix>.
-  # This is a workaround since we cannot remove the `"zfs"` string from `supportedFilesystems`.
-  # The proper fix would be to make `supportedFilesystems` an attrset with true/false which we
-  # could then `lib.mkForce false`
-  nixpkgs.overlays = [(final: super: {
-    zfs = super.zfs.overrideAttrs(_: {
-      meta.platforms = [];
-    });
-  })];
+  boot.supportedFilesystems.zfs = lib.mkForce false;
 }
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-no-zfs-installer.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-no-zfs-installer.nix
index 0e5055960294..da5410057887 100644
--- a/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-no-zfs-installer.nix
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-no-zfs-installer.nix
@@ -1,15 +1,7 @@
-{ pkgs, ... }:
+{ lib, ... }:
 
 {
   imports = [ ./sd-image-aarch64-new-kernel-installer.nix ];
 
-  # Makes `availableOn` fail for zfs, see <nixos/modules/profiles/base.nix>.
-  # This is a workaround since we cannot remove the `"zfs"` string from `supportedFilesystems`.
-  # The proper fix would be to make `supportedFilesystems` an attrset with true/false which we
-  # could then `lib.mkForce false`
-  nixpkgs.overlays = [(final: super: {
-    zfs = super.zfs.overrideAttrs(_: {
-      meta.platforms = [];
-    });
-  })];
+  boot.supportedFilesystems.zfs = lib.mkForce false;
 }
diff --git a/nixpkgs/nixos/modules/misc/nixpkgs-flake.nix b/nixpkgs/nixos/modules/misc/nixpkgs-flake.nix
new file mode 100644
index 000000000000..8bfe05ca1994
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/nixpkgs-flake.nix
@@ -0,0 +1,105 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.nixpkgs.flake;
+in
+{
+  options.nixpkgs.flake = {
+    source = mkOption {
+      # In newer Nix versions, particularly with lazy trees, outPath of
+      # flakes becomes a Nix-language path object. We deliberately allow this
+      # to gracefully come through the interface in discussion with @roberth.
+      #
+      # See: https://github.com/NixOS/nixpkgs/pull/278522#discussion_r1460292639
+      type = types.nullOr (types.either types.str types.path);
+
+      default = null;
+      defaultText = "if (using nixpkgsFlake.lib.nixosSystem) then self.outPath else null";
+
+      example = ''builtins.fetchTarball { name = "source"; sha256 = "${lib.fakeHash}"; url = "https://github.com/nixos/nixpkgs/archive/somecommit.tar.gz"; }'';
+
+      description = mdDoc ''
+        The path to the nixpkgs sources used to build the system. This is automatically set up to be
+        the store path of the nixpkgs flake used to build the system if using
+        `nixpkgs.lib.nixosSystem`, and is otherwise null by default.
+
+        This can also be optionally set if the NixOS system is not built with a flake but still uses
+        pinned sources: set this to the store path for the nixpkgs sources used to build the system,
+        as may be obtained by `builtins.fetchTarball`, for example.
+
+        Note: the name of the store path must be "source" due to
+        <https://github.com/NixOS/nix/issues/7075>.
+      '';
+    };
+
+    setNixPath = mkOption {
+      type = types.bool;
+
+      default = cfg.source != null;
+      defaultText = "config.nixpkgs.flake.source != null";
+
+      description = mdDoc ''
+        Whether to set {env}`NIX_PATH` to include `nixpkgs=flake:nixpkgs` such that `<nixpkgs>`
+        lookups receive the version of nixpkgs that the system was built with, in concert with
+        {option}`nixpkgs.flake.setFlakeRegistry`.
+
+        This is on by default for NixOS configurations built with flakes.
+
+        This makes {command}`nix-build '<nixpkgs>' -A hello` work out of the box on flake systems.
+
+        Note that this option makes the NixOS closure depend on the nixpkgs sources, which may add
+        undesired closure size if the system will not have any nix commands run on it.
+      '';
+    };
+
+    setFlakeRegistry = mkOption {
+      type = types.bool;
+
+      default = cfg.source != null;
+      defaultText = "config.nixpkgs.flake.source != null";
+
+      description = mdDoc ''
+        Whether to pin nixpkgs in the system-wide flake registry (`/etc/nix/registry.json`) to the
+        store path of the sources of nixpkgs used to build the NixOS system.
+
+        This is on by default for NixOS configurations built with flakes.
+
+        This option makes {command}`nix run nixpkgs#hello` reuse dependencies from the system, avoid
+        refetching nixpkgs, and have a consistent result every time.
+
+        Note that this option makes the NixOS closure depend on the nixpkgs sources, which may add
+        undesired closure size if the system will not have any nix commands run on it.
+      '';
+    };
+  };
+
+  config = mkIf (cfg.source != null) (mkMerge [
+    {
+      assertions = [
+        {
+          assertion = cfg.setNixPath -> cfg.setFlakeRegistry;
+          message = ''
+            Setting `nixpkgs.flake.setNixPath` requires that `nixpkgs.flake.setFlakeRegistry` also
+            be set, since it is implemented in terms of indirection through the flake registry.
+          '';
+        }
+      ];
+    }
+    (mkIf cfg.setFlakeRegistry {
+      nix.registry.nixpkgs.to = mkDefault {
+        type = "path";
+        path = cfg.source;
+      };
+    })
+    (mkIf cfg.setNixPath {
+      # N.B. This does not include nixos-config in NIX_PATH unlike modules/config/nix-channel.nix
+      # because we would need some kind of evil shim taking the *calling* flake's self path,
+      # perhaps, to ever make that work (in order to know where the Nix expr for the system came
+      # from and how to call it).
+      nix.nixPath = mkDefault ([ "nixpkgs=flake:nixpkgs" ]
+        ++ optional config.nix.channel.enable "/nix/var/nix/profiles/per-user/root/channels");
+    })
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/module-list.nix b/nixpkgs/nixos/modules/module-list.nix
index a64efec046a4..5d82c6de77e2 100644
--- a/nixpkgs/nixos/modules/module-list.nix
+++ b/nixpkgs/nixos/modules/module-list.nix
@@ -133,6 +133,7 @@
   ./misc/meta.nix
   ./misc/nixops-autoluks.nix
   ./misc/nixpkgs.nix
+  ./misc/nixpkgs-flake.nix
   ./misc/passthru.nix
   ./misc/version.nix
   ./misc/wordlist.nix
@@ -547,6 +548,7 @@
   ./services/hardware/kanata.nix
   ./services/hardware/lcd.nix
   ./services/hardware/lirc.nix
+  ./services/hardware/nvidia-container-toolkit-cdi-generator
   ./services/hardware/nvidia-optimus.nix
   ./services/hardware/openrgb.nix
   ./services/hardware/pcscd.nix
@@ -841,6 +843,7 @@
   ./services/monitoring/riemann.nix
   ./services/monitoring/rustdesk-server.nix
   ./services/monitoring/scollector.nix
+  ./services/monitoring/scrutiny.nix
   ./services/monitoring/smartd.nix
   ./services/monitoring/snmpd.nix
   ./services/monitoring/statsd.nix
@@ -899,7 +902,6 @@
   ./services/networking/autossh.nix
   ./services/networking/avahi-daemon.nix
   ./services/networking/babeld.nix
-  ./services/networking/bee-clef.nix
   ./services/networking/bee.nix
   ./services/networking/biboumi.nix
   ./services/networking/bind.nix
@@ -1403,7 +1405,6 @@
   ./services/web-servers/unit/default.nix
   ./services/web-servers/uwsgi.nix
   ./services/web-servers/varnish/default.nix
-  ./services/web-servers/zope2.nix
   ./services/x11/clight.nix
   ./services/x11/colord.nix
   ./services/x11/desktop-managers/default.nix
diff --git a/nixpkgs/nixos/modules/programs/ccache.nix b/nixpkgs/nixos/modules/programs/ccache.nix
index 567c853e8c7d..7972b2ac4a56 100644
--- a/nixpkgs/nixos/modules/programs/ccache.nix
+++ b/nixpkgs/nixos/modules/programs/ccache.nix
@@ -1,35 +1,43 @@
 { config, pkgs, lib, ... }:
 
-with lib;
 let
   cfg = config.programs.ccache;
 in {
   options.programs.ccache = {
     # host configuration
-    enable = mkEnableOption (lib.mdDoc "CCache");
-    cacheDir = mkOption {
-      type = types.path;
+    enable = lib.mkEnableOption (lib.mdDoc "CCache");
+    cacheDir = lib.mkOption {
+      type = lib.types.path;
       description = lib.mdDoc "CCache directory";
       default = "/var/cache/ccache";
     };
     # target configuration
-    packageNames = mkOption {
-      type = types.listOf types.str;
+    packageNames = lib.mkOption {
+      type = lib.types.listOf lib.types.str;
       description = lib.mdDoc "Nix top-level packages to be compiled using CCache";
       default = [];
       example = [ "wxGTK32" "ffmpeg" "libav_all" ];
     };
+    owner = lib.mkOption {
+      type = lib.types.str;
+      default = "root";
+      description = lib.mdDoc "Owner of CCache directory";
+    };
+    group = lib.mkOption {
+      type = lib.types.str;
+      default = "nixbld";
+      description = lib.mdDoc "Group owner of CCache directory";
+    };
   };
 
-  config = mkMerge [
+  config = lib.mkMerge [
     # host configuration
-    (mkIf cfg.enable {
-      systemd.tmpfiles.rules = [ "d ${cfg.cacheDir} 0770 root nixbld -" ];
+    (lib.mkIf cfg.enable {
+      systemd.tmpfiles.rules = [ "d ${cfg.cacheDir} 0770 ${cfg.owner} ${cfg.group} -" ];
 
       # "nix-ccache --show-stats" and "nix-ccache --clear"
       security.wrappers.nix-ccache = {
-        owner = "root";
-        group = "nixbld";
+        inherit (cfg) owner group;
         setuid = false;
         setgid = true;
         source = pkgs.writeScript "nix-ccache.pl" ''
@@ -50,9 +58,9 @@ in {
     })
 
     # target configuration
-    (mkIf (cfg.packageNames != []) {
+    (lib.mkIf (cfg.packageNames != []) {
       nixpkgs.overlays = [
-        (self: super: genAttrs cfg.packageNames (pn: super.${pn}.override { stdenv = builtins.trace "with ccache: ${pn}" self.ccacheStdenv; }))
+        (self: super: lib.genAttrs cfg.packageNames (pn: super.${pn}.override { stdenv = builtins.trace "with ccache: ${pn}" self.ccacheStdenv; }))
 
         (self: super: {
           ccacheWrapper = super.ccacheWrapper.override {
@@ -65,7 +73,7 @@ in {
                 echo "Directory '$CCACHE_DIR' does not exist"
                 echo "Please create it with:"
                 echo "  sudo mkdir -m0770 '$CCACHE_DIR'"
-                echo "  sudo chown root:nixbld '$CCACHE_DIR'"
+                echo "  sudo chown ${cfg.owner}:${cfg.group} '$CCACHE_DIR'"
                 echo "====="
                 exit 1
               fi
diff --git a/nixpkgs/nixos/modules/programs/steam.nix b/nixpkgs/nixos/modules/programs/steam.nix
index 29c449c16946..c7f1e622f7ba 100644
--- a/nixpkgs/nixos/modules/programs/steam.nix
+++ b/nixpkgs/nixos/modules/programs/steam.nix
@@ -82,6 +82,14 @@ in {
       '';
     };
 
+    localNetworkGameTransfers.openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open ports in the firewall for Steam Local Network Game Transfers.
+      '';
+    };
+
     gamescopeSession = mkOption {
       description = mdDoc "Run a GameScope driven Steam session from your display-manager";
       default = {};
@@ -139,15 +147,23 @@ in {
     ] ++ lib.optional cfg.gamescopeSession.enable steam-gamescope;
 
     networking.firewall = lib.mkMerge [
+      (mkIf (cfg.remotePlay.openFirewall || cfg.localNetworkGameTransfers.openFirewall) {
+        allowedUDPPorts = [ 27036 ]; # Peer discovery
+      })
+
       (mkIf cfg.remotePlay.openFirewall {
         allowedTCPPorts = [ 27036 ];
-        allowedUDPPortRanges = [ { from = 27031; to = 27036; } ];
+        allowedUDPPortRanges = [ { from = 27031; to = 27035; } ];
       })
 
       (mkIf cfg.dedicatedServer.openFirewall {
         allowedTCPPorts = [ 27015 ]; # SRCDS Rcon port
         allowedUDPPorts = [ 27015 ]; # Gameplay traffic
       })
+
+      (mkIf cfg.localNetworkGameTransfers.openFirewall {
+        allowedTCPPorts = [ 27040 ]; # Data transfers
+      })
     ];
   };
 
diff --git a/nixpkgs/nixos/modules/security/ca.nix b/nixpkgs/nixos/modules/security/ca.nix
index 3cd56bff04d1..ae188ea709dd 100644
--- a/nixpkgs/nixos/modules/security/ca.nix
+++ b/nixpkgs/nixos/modules/security/ca.nix
@@ -11,7 +11,8 @@ let
     extraCertificateFiles = cfg.certificateFiles;
     extraCertificateStrings = cfg.certificates;
   };
-  caBundle = "${cacertPackage}/etc/ssl/certs/ca-bundle.crt";
+  caBundleName = if cfg.useCompatibleBundle then "ca-no-trust-rules-bundle.crt" else "ca-bundle.crt";
+  caBundle = "${cacertPackage}/etc/ssl/certs/${caBundleName}";
 
 in
 
@@ -23,6 +24,17 @@ in
       internal = true;
     };
 
+    security.pki.useCompatibleBundle = mkEnableOption ''usage of a compatibility bundle.
+
+      Such a bundle consist exclusively of `BEGIN CERTIFICATE` and no `BEGIN TRUSTED CERTIFICATE`,
+      which is a OpenSSL specific PEM format.
+
+      It is known to be incompatible with certain software stacks.
+
+      Nevertheless, enabling this will strip all additional trust rules provided by the
+      certificates themselves, this can have security consequences depending on your usecases.
+    '';
+
     security.pki.certificateFiles = mkOption {
       type = types.listOf types.path;
       default = [];
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix
index fd2dce7ee6a2..313dbe234018 100644
--- a/nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix
@@ -33,6 +33,41 @@ let
 
   kubeconfig = top.lib.mkKubeConfig "kubelet" cfg.kubeconfig;
 
+  # Flag based settings are deprecated, use the `--config` flag with a
+  # `KubeletConfiguration` struct.
+  # https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/
+  #
+  # NOTE: registerWithTaints requires a []core/v1.Taint, therefore requires
+  # additional work to be put in config format.
+  #
+  kubeletConfig = pkgs.writeText "kubelet-config" (builtins.toJSON ({
+    apiVersion = "kubelet.config.k8s.io/v1beta1";
+    kind = "KubeletConfiguration";
+    address = cfg.address;
+    port = cfg.port;
+    authentication = {
+      x509 = lib.optionalAttrs (cfg.clientCaFile != null) { clientCAFile = cfg.clientCaFile; };
+      webhook = {
+        enabled = true;
+        cacheTTL = "10s";
+      };
+    };
+    authorization = {
+      mode = "Webhook";
+    };
+    cgroupDriver = "systemd";
+    hairpinMode = "hairpin-veth";
+    registerNode = cfg.registerNode;
+    containerRuntimeEndpoint = cfg.containerRuntimeEndpoint;
+    healthzPort = cfg.healthz.port;
+    healthzBindAddress = cfg.healthz.bind;
+  } // lib.optionalAttrs (cfg.tlsCertFile != null)  { tlsCertFile = cfg.tlsCertFile; }
+    // lib.optionalAttrs (cfg.tlsKeyFile != null)   { tlsPrivateKeyFile = cfg.tlsKeyFile; }
+    // lib.optionalAttrs (cfg.clusterDomain != "")  { clusterDomain = cfg.clusterDomain; }
+    // lib.optionalAttrs (cfg.clusterDns != "")     { clusterDNS = [ cfg.clusterDns ] ; }
+    // lib.optionalAttrs (cfg.featureGates != [])   { featureGates = cfg.featureGates; }
+  ));
+
   manifestPath = "kubernetes/manifests";
 
   taintOptions = with lib.types; { name, ... }: {
@@ -294,21 +329,7 @@ in
           Restart = "on-failure";
           RestartSec = "1000ms";
           ExecStart = ''${top.package}/bin/kubelet \
-            --address=${cfg.address} \
-            --authentication-token-webhook \
-            --authentication-token-webhook-cache-ttl="10s" \
-            --authorization-mode=Webhook \
-            ${optionalString (cfg.clientCaFile != null)
-              "--client-ca-file=${cfg.clientCaFile}"} \
-            ${optionalString (cfg.clusterDns != "")
-              "--cluster-dns=${cfg.clusterDns}"} \
-            ${optionalString (cfg.clusterDomain != "")
-              "--cluster-domain=${cfg.clusterDomain}"} \
-            ${optionalString (cfg.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
-            --hairpin-mode=hairpin-veth \
-            --healthz-bind-address=${cfg.healthz.bind} \
-            --healthz-port=${toString cfg.healthz.port} \
+            --config=${kubeletConfig} \
             --hostname-override=${cfg.hostname} \
             --kubeconfig=${kubeconfig} \
             ${optionalString (cfg.nodeIp != null)
@@ -316,18 +337,10 @@ in
             --pod-infra-container-image=pause \
             ${optionalString (cfg.manifests != {})
               "--pod-manifest-path=/etc/${manifestPath}"} \
-            --port=${toString cfg.port} \
-            --register-node=${boolToString cfg.registerNode} \
             ${optionalString (taints != "")
               "--register-with-taints=${taints}"} \
             --root-dir=${top.dataDir} \
-            ${optionalString (cfg.tlsCertFile != null)
-              "--tls-cert-file=${cfg.tlsCertFile}"} \
-            ${optionalString (cfg.tlsKeyFile != null)
-              "--tls-private-key-file=${cfg.tlsKeyFile}"} \
             ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
-            --container-runtime-endpoint=${cfg.containerRuntimeEndpoint} \
-            --cgroup-driver=systemd \
             ${cfg.extraOpts}
           '';
           WorkingDirectory = top.dataDir;
diff --git a/nixpkgs/nixos/modules/services/development/lorri.nix b/nixpkgs/nixos/modules/services/development/lorri.nix
index 74f56f5890fc..df3d814d7444 100644
--- a/nixpkgs/nixos/modules/services/development/lorri.nix
+++ b/nixpkgs/nixos/modules/services/development/lorri.nix
@@ -44,8 +44,7 @@ in {
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/lorri daemon";
         PrivateTmp = true;
-        ProtectSystem = "strict";
-        ProtectHome = "read-only";
+        ProtectSystem = "full";
         Restart = "on-failure";
       };
     };
diff --git a/nixpkgs/nixos/modules/services/games/archisteamfarm.nix b/nixpkgs/nixos/modules/services/games/archisteamfarm.nix
index c00ae8116b39..4bb7234f430f 100644
--- a/nixpkgs/nixos/modules/services/games/archisteamfarm.nix
+++ b/nixpkgs/nixos/modules/services/games/archisteamfarm.nix
@@ -270,6 +270,6 @@ in
 
   meta = {
     buildDocsInSandbox = false;
-    maintainers = with lib.maintainers; [ lom SuperSandro2000 ];
+    maintainers = with lib.maintainers; [ SuperSandro2000 ];
   };
 }
diff --git a/nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix b/nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix
index f472b5774cbf..746154e7aa17 100644
--- a/nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix
+++ b/nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix
@@ -60,6 +60,10 @@ in
       systemd.services.hddfancontrol = {
         wantedBy = [ "multi-user.target" ];
         environment.HDDFANCONTROL_ARGS = lib.escapeShellArgs args;
+        serviceConfig = {
+          # Hardening
+          PrivateNetwork = true;
+        };
       };
     }
   );
diff --git a/nixpkgs/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix b/nixpkgs/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix
new file mode 100644
index 000000000000..a90d234f65c0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix
@@ -0,0 +1,39 @@
+{ config, lib, pkgs }: let
+  mountOptions = { options = ["ro" "nosuid" "nodev" "bind"]; };
+  mounts = [
+    { hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-cuda-mps-control";
+      containerPath = "/usr/bin/nvidia-cuda-mps-control"; }
+    { hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-cuda-mps-server";
+      containerPath = "/usr/bin/nvidia-cuda-mps-server"; }
+    { hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-debugdump";
+      containerPath = "/usr/bin/nvidia-debugdump"; }
+    { hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-powerd";
+      containerPath = "/usr/bin/nvidia-powerd"; }
+    { hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-smi";
+      containerPath = "/usr/bin/nvidia-smi"; }
+    { hostPath = "${pkgs.nvidia-container-toolkit}/bin/nvidia-ctk";
+      containerPath = "/usr/bin/nvidia-ctk"; }
+    { hostPath = "${pkgs.glibc}/lib";
+      containerPath = "${pkgs.glibc}/lib"; }
+    { hostPath = "${pkgs.glibc}/lib64";
+      containerPath = "${pkgs.glibc}/lib64"; }
+  ];
+  jqAddMountExpression = ".containerEdits.mounts[.containerEdits.mounts | length] |= . +";
+  mountsToJq = lib.concatMap
+    (mount:
+      ["${pkgs.jq}/bin/jq '${jqAddMountExpression} ${builtins.toJSON (mount // mountOptions)}'"])
+    mounts;
+in ''
+#! ${pkgs.runtimeShell}
+
+function cdiGenerate {
+  ${pkgs.nvidia-container-toolkit}/bin/nvidia-ctk cdi generate \
+    --format json \
+    --ldconfig-path ${pkgs.glibc.bin}/bin/ldconfig \
+    --library-search-path ${config.hardware.nvidia.package}/lib \
+    --nvidia-ctk-path ${pkgs.nvidia-container-toolkit}/bin/nvidia-ctk
+}
+
+cdiGenerate | \
+  ${lib.concatStringsSep " | " mountsToJq} > $RUNTIME_DIRECTORY/nvidia-container-toolkit.json
+''
diff --git a/nixpkgs/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix b/nixpkgs/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix
new file mode 100644
index 000000000000..3c96e9c41be5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+
+{
+
+  options = {
+
+    hardware.nvidia-container-toolkit-cdi-generator.enable = lib.mkOption {
+      default = false;
+      internal = true;
+      visible = false;
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Enable dynamic CDI configuration for NVidia devices by running
+        nvidia-container-toolkit on boot.
+      '';
+    };
+
+  };
+
+  config = {
+
+    systemd.services.nvidia-container-toolkit-cdi-generator = lib.mkIf config.hardware.nvidia-container-toolkit-cdi-generator.enable {
+      description = "Container Device Interface (CDI) for Nvidia generator";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
+      serviceConfig = {
+        RuntimeDirectory = "cdi";
+        RemainAfterExit = true;
+        ExecStart = let
+          script = (pkgs.writeScriptBin "nvidia-cdi-generator"
+            (import ./cdi-generate.nix { inherit config lib pkgs; })); in (lib.getExe script);
+        Type = "oneshot";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/pcscd.nix b/nixpkgs/nixos/modules/services/hardware/pcscd.nix
index b5963e1d29a3..77c2d9b53f03 100644
--- a/nixpkgs/nixos/modules/services/hardware/pcscd.nix
+++ b/nixpkgs/nixos/modules/services/hardware/pcscd.nix
@@ -3,6 +3,7 @@
 with lib;
 
 let
+  cfg = config.services.pcscd;
   cfgFile = pkgs.writeText "reader.conf" config.services.pcscd.readerConfig;
 
   package = if config.security.polkit.enable
@@ -41,6 +42,12 @@ in
         See {manpage}`reader.conf(5)` for valid options.
       '';
     };
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      description = lib.mdDoc "Extra command line arguments to be passed to the PCSC daemon.";
+    };
   };
 
   config = mkIf config.services.pcscd.enable {
@@ -64,7 +71,7 @@ in
       # around it, we force the path to the cfgFile.
       #
       # https://github.com/NixOS/nixpkgs/issues/121088
-      serviceConfig.ExecStart = [ "" "${package}/bin/pcscd -f -x -c ${cfgFile}" ];
+      serviceConfig.ExecStart = [ "" "${lib.getExe package} -f -x -c ${cfgFile} ${lib.escapeShellArgs cfg.extraArgs}" ];
     };
   };
 }
diff --git a/nixpkgs/nixos/modules/services/hardware/thinkfan.nix b/nixpkgs/nixos/modules/services/hardware/thinkfan.nix
index cca35f492b8e..b62fb5e9f8c9 100644
--- a/nixpkgs/nixos/modules/services/hardware/thinkfan.nix
+++ b/nixpkgs/nixos/modules/services/hardware/thinkfan.nix
@@ -217,8 +217,13 @@ in {
 
     systemd.services = {
       thinkfan.environment.THINKFAN_ARGS = escapeShellArgs ([ "-c" configFile ] ++ cfg.extraArgs);
-      thinkfan.serviceConfig.Restart = "on-failure";
-      thinkfan.serviceConfig.RestartSec = "30s";
+      thinkfan.serviceConfig = {
+        Restart = "on-failure";
+        RestartSec = "30s";
+
+        # Hardening
+        PrivateNetwork = true;
+      };
 
       # must be added manually, see issue #81138
       thinkfan.wantedBy = [ "multi-user.target" ];
diff --git a/nixpkgs/nixos/modules/services/misc/atuin.nix b/nixpkgs/nixos/modules/services/misc/atuin.nix
index 2d6ffc510ce5..7e89929884d6 100644
--- a/nixpkgs/nixos/modules/services/misc/atuin.nix
+++ b/nixpkgs/nixos/modules/services/misc/atuin.nix
@@ -8,6 +8,8 @@ in
     services.atuin = {
       enable = lib.mkEnableOption (mdDoc "Atuin server for shell history sync");
 
+      package = lib.mkPackageOption pkgs "atuin" { };
+
       openRegistration = mkOption {
         type = types.bool;
         default = false;
@@ -52,10 +54,13 @@ in
         };
 
         uri = mkOption {
-          type = types.str;
+          type = types.nullOr types.str;
           default = "postgresql:///atuin?host=/run/postgresql";
           example = "postgresql://atuin@localhost:5432/atuin";
-          description = mdDoc "URI to the database";
+          description = mdDoc ''
+            URI to the database.
+            Can be set to null in which case ATUIN_DB_URI should be set through an EnvironmentFile
+          '';
         };
       };
     };
@@ -85,7 +90,7 @@ in
       wantedBy = [ "multi-user.target" ];
 
       serviceConfig = {
-        ExecStart = "${pkgs.atuin}/bin/atuin server start";
+        ExecStart = "${lib.getExe cfg.package} server start";
         RuntimeDirectory = "atuin";
         RuntimeDirectoryMode = "0700";
         DynamicUser = true;
@@ -132,9 +137,10 @@ in
         ATUIN_PORT = toString cfg.port;
         ATUIN_MAX_HISTORY_LENGTH = toString cfg.maxHistoryLength;
         ATUIN_OPEN_REGISTRATION = lib.boolToString cfg.openRegistration;
-        ATUIN_DB_URI = cfg.database.uri;
         ATUIN_PATH = cfg.path;
         ATUIN_CONFIG_DIR = "/run/atuin"; # required to start, but not used as configuration is via environment variables
+      } // lib.optionalAttrs (cfg.database.uri != null) {
+        ATUIN_DB_URI = cfg.database.uri;
       };
     };
 
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix
index 36409caccf2e..2a8b7fc0818d 100644
--- a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix
@@ -1,41 +1,54 @@
-{ config, lib, pkgs, options }:
+{ config
+, lib
+, pkgs
+, options
+}:
 
-with lib;
+let
+  inherit (lib)
+    escapeShellArgs
+    mkOption
+    optionals
+    types
+  ;
 
-let cfg = config.services.prometheus.exporters.fastly;
+  cfg = config.services.prometheus.exporters.fastly;
 in
 {
   port = 9118;
-  extraOpts = {
-    debug = mkEnableOption (lib.mdDoc "Debug logging mode for fastly-exporter");
-
+  extraOpts = with types; {
     configFile = mkOption {
-      type = types.nullOr types.path;
+      type = nullOr path;
       default = null;
-      description = lib.mdDoc ''
+      example = "./fastly-exporter-config.txt";
+      description = ''
         Path to a fastly-exporter configuration file.
         Example one can be generated with `fastly-exporter --config-file-example`.
       '';
-      example = "./fastly-exporter-config.txt";
     };
 
     tokenPath = mkOption {
-      type = types.nullOr types.path;
-      apply = final: if final == null then null else toString final;
-      description = lib.mdDoc ''
+      type = path;
+      description = ''
         A run-time path to the token file, which is supposed to be provisioned
         outside of Nix store.
       '';
     };
   };
   serviceOpts = {
-    script = ''
-      ${optionalString (cfg.tokenPath != null)
-      "export FASTLY_API_TOKEN=$(cat ${toString cfg.tokenPath})"}
-      ${pkgs.prometheus-fastly-exporter}/bin/fastly-exporter \
-        -listen http://${cfg.listenAddress}:${toString cfg.port}
-        ${optionalString cfg.debug "-debug true"} \
-        ${optionalString (cfg.configFile != null) "-config-file ${cfg.configFile}"}
+    serviceConfig = {
+      LoadCredential = "fastly-api-token:${cfg.tokenPath}";
+    };
+    script = let
+      call = escapeShellArgs ([
+        "${pkgs.prometheus-fastly-exporter}/bin/fastly-exporter"
+        "-listen" "${cfg.listenAddress}:${toString cfg.port}"
+      ] ++ optionals (cfg.configFile != null) [
+        "--config-file" cfg.configFile
+      ] ++ cfg.extraFlags);
+    in ''
+      export FASTLY_API_TOKEN="$(cat $CREDENTIALS_DIRECTORY/fastly-api-token)"
+      ${call}
     '';
   };
 }
diff --git a/nixpkgs/nixos/modules/services/monitoring/scrutiny.nix b/nixpkgs/nixos/modules/services/monitoring/scrutiny.nix
new file mode 100644
index 000000000000..454668a9a128
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/scrutiny.nix
@@ -0,0 +1,221 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.scrutiny;
+  # Define the settings format used for this program
+  settingsFormat = pkgs.formats.yaml { };
+in
+{
+  options = {
+    services.scrutiny = {
+      enable = lib.mkEnableOption "Enables the scrutiny web application.";
+
+      package = lib.mkPackageOptionMD pkgs "scrutiny" { };
+
+      openFirewall = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = "Open the default ports in the firewall for Scrutiny.";
+      };
+
+      influxdb.enable = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enables InfluxDB on the host system using the `services.influxdb2` NixOS module
+          with default options.
+
+          If you already have InfluxDB configured, or wish to connect to an external InfluxDB
+          instance, disable this option.
+        '';
+      };
+
+      settings = lib.mkOption {
+        description = lib.mdDoc ''
+          Scrutiny settings to be rendered into the configuration file.
+
+          See https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml.
+        '';
+        default = { };
+        type = lib.types.submodule {
+          freeformType = settingsFormat.type;
+
+          options.web.listen.port = lib.mkOption {
+            type = lib.types.port;
+            default = 8080;
+            description = lib.mdDoc "Port for web application to listen on.";
+          };
+
+          options.web.listen.host = lib.mkOption {
+            type = lib.types.str;
+            default = "0.0.0.0";
+            description = lib.mdDoc "Interface address for web application to bind to.";
+          };
+
+          options.web.listen.basepath = lib.mkOption {
+            type = lib.types.str;
+            default = "";
+            example = "/scrutiny";
+            description = lib.mdDoc ''
+              If Scrutiny will be behind a path prefixed reverse proxy, you can override this
+              value to serve Scrutiny on a subpath.
+            '';
+          };
+
+          options.log.level = lib.mkOption {
+            type = lib.types.enum [ "INFO" "DEBUG" ];
+            default = "INFO";
+            description = lib.mdDoc "Log level for Scrutiny.";
+          };
+
+          options.web.influxdb.scheme = lib.mkOption {
+            type = lib.types.str;
+            default = "http";
+            description = lib.mdDoc "URL scheme to use when connecting to InfluxDB.";
+          };
+
+          options.web.influxdb.host = lib.mkOption {
+            type = lib.types.str;
+            default = "0.0.0.0";
+            description = lib.mdDoc "IP or hostname of the InfluxDB instance.";
+          };
+
+          options.web.influxdb.port = lib.mkOption {
+            type = lib.types.port;
+            default = 8086;
+            description = lib.mdDoc "The port of the InfluxDB instance.";
+          };
+
+          options.web.influxdb.tls.insecure_skip_verify = lib.mkOption {
+            type = lib.types.bool;
+            default = false;
+            description = lib.mdDoc "Skip TLS verification when connecting to InfluxDB.";
+          };
+
+          options.web.influxdb.token = lib.mkOption {
+            type = lib.types.nullOr lib.types.str;
+            default = null;
+            description = lib.mdDoc "Authentication token for connecting to InfluxDB.";
+          };
+
+          options.web.influxdb.org = lib.mkOption {
+            type = lib.types.nullOr lib.types.str;
+            default = null;
+            description = lib.mdDoc "InfluxDB organisation under which to store data.";
+          };
+
+          options.web.influxdb.bucket = lib.mkOption {
+            type = lib.types.nullOr lib.types.str;
+            default = null;
+            description = lib.mdDoc "InfluxDB bucket in which to store data.";
+          };
+        };
+      };
+
+      collector = {
+        enable = lib.mkEnableOption "Enables the scrutiny metrics collector.";
+
+        package = lib.mkPackageOptionMD pkgs "scrutiny-collector" { };
+
+        schedule = lib.mkOption {
+          type = lib.types.str;
+          default = "*:0/15";
+          description = lib.mdDoc ''
+            How often to run the collector in systemd calendar format.
+          '';
+        };
+
+        settings = lib.mkOption {
+          description = lib.mdDoc ''
+            Collector settings to be rendered into the collector configuration file.
+
+            See https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml.
+          '';
+          default = { };
+          type = lib.types.submodule {
+            freeformType = settingsFormat.type;
+
+            options.host.id = lib.mkOption {
+              type = lib.types.nullOr lib.types.str;
+              default = null;
+              description = lib.mdDoc "Host ID for identifying/labelling groups of disks";
+            };
+
+            options.api.endpoint = lib.mkOption {
+              type = lib.types.str;
+              default = "http://localhost:8080";
+              description = lib.mdDoc "Scrutiny app API endpoint for sending metrics to.";
+            };
+
+            options.log.level = lib.mkOption {
+              type = lib.types.enum [ "INFO" "DEBUG" ];
+              default = "INFO";
+              description = lib.mdDoc "Log level for Scrutiny collector.";
+            };
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf (cfg.enable || cfg.collector.enable) {
+    services.influxdb2.enable = cfg.influxdb.enable;
+
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.settings.web.listen.port ];
+    };
+
+    services.smartd = lib.mkIf cfg.collector.enable {
+      enable = true;
+      extraOptions = [
+        "-A /var/log/smartd/"
+        "--interval=600"
+      ];
+    };
+
+    systemd = {
+      services = {
+        scrutiny = lib.mkIf cfg.enable {
+          description = "Hard Drive S.M.A.R.T Monitoring, Historical Trends & Real World Failure Thresholds";
+          wantedBy = [ "multi-user.target" ];
+          after = [ "network.target" ];
+          environment = {
+            SCRUTINY_VERSION = "1";
+            SCRUTINY_WEB_DATABASE_LOCATION = "/var/lib/scrutiny/scrutiny.db";
+            SCRUTINY_WEB_SRC_FRONTEND_PATH = "${cfg.package}/share/scrutiny";
+          };
+          serviceConfig = {
+            DynamicUser = true;
+            ExecStart = "${lib.getExe cfg.package} start --config ${settingsFormat.generate "scrutiny.yaml" cfg.settings}";
+            Restart = "always";
+            StateDirectory = "scrutiny";
+            StateDirectoryMode = "0750";
+          };
+        };
+
+        scrutiny-collector = lib.mkIf cfg.collector.enable {
+          description = "Scrutiny Collector Service";
+          environment = {
+            COLLECTOR_VERSION = "1";
+            COLLECTOR_API_ENDPOINT = cfg.collector.settings.api.endpoint;
+          };
+          serviceConfig = {
+            Type = "oneshot";
+            ExecStart = "${lib.getExe cfg.collector.package} run --config ${settingsFormat.generate "scrutiny-collector.yaml" cfg.collector.settings}";
+          };
+        };
+      };
+
+      timers = lib.mkIf cfg.collector.enable {
+        scrutiny-collector = {
+          timerConfig = {
+            OnCalendar = cfg.collector.schedule;
+            Persistent = true;
+            Unit = "scrutiny-collector.service";
+          };
+        };
+      };
+    };
+  };
+
+  meta.maintainers = [ lib.maintainers.jnsgruk ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/bee-clef.nix b/nixpkgs/nixos/modules/services/networking/bee-clef.nix
deleted file mode 100644
index 75e76f019a71..000000000000
--- a/nixpkgs/nixos/modules/services/networking/bee-clef.nix
+++ /dev/null
@@ -1,107 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-# NOTE for now nothing is installed into /etc/bee-clef/. the config files are used as read-only from the nix store.
-
-with lib;
-let
-  cfg = config.services.bee-clef;
-in {
-  meta = {
-    maintainers = with maintainers; [ attila-lendvai ];
-  };
-
-  ### interface
-
-  options = {
-    services.bee-clef = {
-      enable = mkEnableOption (lib.mdDoc "clef external signer instance for Ethereum Swarm Bee");
-
-      dataDir = mkOption {
-        type = types.nullOr types.str;
-        default = "/var/lib/bee-clef";
-        description = lib.mdDoc ''
-          Data dir for bee-clef. Beware that some helper scripts may not work when changed!
-          The service itself should work fine, though.
-        '';
-      };
-
-      passwordFile = mkOption {
-        type = types.nullOr types.str;
-        default = "/var/lib/bee-clef/password";
-        description = lib.mdDoc "Password file for bee-clef.";
-      };
-
-      user = mkOption {
-        type = types.str;
-        default = "bee-clef";
-        description = lib.mdDoc ''
-          User the bee-clef daemon should execute under.
-        '';
-      };
-
-      group = mkOption {
-        type = types.str;
-        default = "bee-clef";
-        description = lib.mdDoc ''
-          Group the bee-clef daemon should execute under.
-        '';
-      };
-    };
-  };
-
-  ### implementation
-
-  config = mkIf cfg.enable {
-    # if we ever want to have rules.js under /etc/bee-clef/
-    # environment.etc."bee-clef/rules.js".source = ${pkgs.bee-clef}/rules.js
-
-    systemd.packages = [ pkgs.bee-clef ]; # include the upstream bee-clef.service file
-
-    systemd.tmpfiles.rules = [
-        "d '${cfg.dataDir}/'         0750 ${cfg.user} ${cfg.group}"
-        "d '${cfg.dataDir}/keystore' 0700 ${cfg.user} ${cfg.group}"
-      ];
-
-    systemd.services.bee-clef = {
-      path = [
-        # these are needed for the ensure-clef-account script
-        pkgs.coreutils
-        pkgs.gnused
-        pkgs.gawk
-      ];
-
-      wantedBy = [ "bee.service" "multi-user.target" ];
-
-      serviceConfig = {
-        User = cfg.user;
-        Group = cfg.group;
-        ExecStartPre = ''${pkgs.bee-clef}/share/bee-clef/ensure-clef-account "${cfg.dataDir}" "${pkgs.bee-clef}/share/bee-clef/"'';
-        ExecStart = [
-          "" # this hides/overrides what's in the original entry
-          "${pkgs.bee-clef}/share/bee-clef/bee-clef-service start"
-        ];
-        ExecStop = [
-          "" # this hides/overrides what's in the original entry
-          "${pkgs.bee-clef}/share/bee-clef/bee-clef-service stop"
-        ];
-        Environment = [
-          "CONFIGDIR=${cfg.dataDir}"
-          "PASSWORD_FILE=${cfg.passwordFile}"
-        ];
-      };
-    };
-
-    users.users = optionalAttrs (cfg.user == "bee-clef") {
-      bee-clef = {
-        group = cfg.group;
-        home = cfg.dataDir;
-        isSystemUser = true;
-        description = "Daemon user for the bee-clef service";
-      };
-    };
-
-    users.groups = optionalAttrs (cfg.group == "bee-clef") {
-      bee-clef = {};
-    };
-  };
-}
diff --git a/nixpkgs/nixos/modules/services/networking/bee.nix b/nixpkgs/nixos/modules/services/networking/bee.nix
index 962cfd30c3fe..a4d20494bf6b 100644
--- a/nixpkgs/nixos/modules/services/networking/bee.nix
+++ b/nixpkgs/nixos/modules/services/networking/bee.nix
@@ -8,7 +8,7 @@ let
 in {
   meta = {
     # doc = ./bee.xml;
-    maintainers = with maintainers; [ attila-lendvai ];
+    maintainers = with maintainers; [ ];
   };
 
   ### interface
@@ -73,13 +73,10 @@ in {
       }
     ];
 
-    warnings = optional (! config.services.bee-clef.enable) "The bee service requires an external signer. Consider setting `config.services.bee-clef.enable` = true";
-
     services.bee.settings = {
       data-dir             = lib.mkDefault "/var/lib/bee";
       password-file        = lib.mkDefault "/var/lib/bee/password";
       clef-signer-enable   = lib.mkDefault true;
-      clef-signer-endpoint = lib.mkDefault "/var/lib/bee-clef/clef.ipc";
       swap-endpoint        = lib.mkDefault "https://rpc.slock.it/goerli";
     };
 
@@ -90,9 +87,6 @@ in {
     ];
 
     systemd.services.bee = {
-      requires = optional config.services.bee-clef.enable
-        "bee-clef.service";
-
       wantedBy = [ "multi-user.target" ];
 
       serviceConfig = {
@@ -120,7 +114,6 @@ Bee has SWAP enabled by default and it needs ethereum endpoint to operate.
 It is recommended to use external signer with bee.
 Check documentation for more info:
 - SWAP https://docs.ethswarm.org/docs/installation/manual#swap-bandwidth-incentives
-- External signer https://docs.ethswarm.org/docs/installation/bee-clef
 
 After you finish configuration run 'sudo bee-get-addr'."
         fi
@@ -133,8 +126,6 @@ After you finish configuration run 'sudo bee-get-addr'."
         home = cfg.settings.data-dir;
         isSystemUser = true;
         description = "Daemon user for Ethereum Swarm Bee";
-        extraGroups = optional config.services.bee-clef.enable
-          config.services.bee-clef.group;
       };
     };
 
diff --git a/nixpkgs/nixos/modules/services/networking/sabnzbd.nix b/nixpkgs/nixos/modules/services/networking/sabnzbd.nix
index cff2622b38e9..2f0d17ad3d17 100644
--- a/nixpkgs/nixos/modules/services/networking/sabnzbd.nix
+++ b/nixpkgs/nixos/modules/services/networking/sabnzbd.nix
@@ -36,6 +36,14 @@ in
         default = "sabnzbd";
         description = lib.mdDoc "Group to run the service as";
       };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for the sabnzbd web interface
+        '';
+      };
     };
   };
 
@@ -43,17 +51,16 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
-
-    users.users.sabnzbd = {
-          uid = config.ids.uids.sabnzbd;
-          group = "sabnzbd";
-          description = "sabnzbd user";
-          home = "/var/lib/sabnzbd/";
-          createHome = true;
+    users.users = mkIf (cfg.user == "sabnzbd") {
+      sabnzbd = {
+        uid = config.ids.uids.sabnzbd;
+        group = cfg.group;
+        description = "sabnzbd user";
+      };
     };
 
-    users.groups.sabnzbd = {
-      gid = config.ids.gids.sabnzbd;
+    users.groups = mkIf (cfg.group == "sabnzbd") {
+      sabnzbd.gid = config.ids.gids.sabnzbd;
     };
 
     systemd.services.sabnzbd = {
@@ -63,10 +70,15 @@ in
         serviceConfig = {
           Type = "forking";
           GuessMainPID = "no";
-          User = "${cfg.user}";
-          Group = "${cfg.group}";
+          User = cfg.user;
+          Group = cfg.group;
+          StateDirectory = "sabnzbd";
           ExecStart = "${lib.getBin cfg.package}/bin/sabnzbd -d -f ${cfg.configFile}";
         };
     };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 8080 ];
+    };
   };
 }
diff --git a/nixpkgs/nixos/modules/services/web-servers/zope2.nix b/nixpkgs/nixos/modules/services/web-servers/zope2.nix
deleted file mode 100644
index 29731b29eea4..000000000000
--- a/nixpkgs/nixos/modules/services/web-servers/zope2.nix
+++ /dev/null
@@ -1,262 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-
-  cfg = config.services.zope2;
-
-  zope2Opts = { name, ... }: {
-    options = {
-
-      name = mkOption {
-        default = "${name}";
-        type = types.str;
-        description = lib.mdDoc "The name of the zope2 instance. If undefined, the name of the attribute set will be used.";
-      };
-
-      threads = mkOption {
-        default = 2;
-        type = types.int;
-        description = lib.mdDoc "Specify the number of threads that Zope's ZServer web server will use to service requests. ";
-      };
-
-      http_address = mkOption {
-        default = "localhost:8080";
-        type = types.str;
-        description = lib.mdDoc "Give a port and address for the HTTP server.";
-      };
-
-      user = mkOption {
-        default = "zope2";
-        type = types.str;
-        description = lib.mdDoc "The name of the effective user for the Zope process.";
-      };
-
-      clientHome = mkOption {
-        default = "/var/lib/zope2/${name}";
-        type = types.path;
-        description = lib.mdDoc "Home directory of zope2 instance.";
-      };
-      extra = mkOption {
-        default =
-          ''
-          <zodb_db main>
-            mount-point /
-            cache-size 30000
-            <blobstorage>
-                blob-dir /var/lib/zope2/${name}/blobstorage
-                <filestorage>
-                path /var/lib/zope2/${name}/filestorage/Data.fs
-                </filestorage>
-            </blobstorage>
-          </zodb_db>
-          '';
-        type = types.lines;
-        description = lib.mdDoc "Extra zope.conf";
-      };
-
-      packages = mkOption {
-        type = types.listOf types.package;
-        description = lib.mdDoc "The list of packages you want to make available to the zope2 instance.";
-      };
-
-    };
-  };
-
-in
-
-{
-
-  ###### interface
-
-  options = {
-
-    services.zope2.instances = mkOption {
-      default = {};
-      type = with types; attrsOf (submodule zope2Opts);
-      example = literalExpression ''
-        {
-          plone01 = {
-            http_address = "127.0.0.1:8080";
-            extra =
-              '''
-              <zodb_db main>
-                mount-point /
-                cache-size 30000
-                <blobstorage>
-                    blob-dir /var/lib/zope2/plone01/blobstorage
-                    <filestorage>
-                    path /var/lib/zope2/plone01/filestorage/Data.fs
-                    </filestorage>
-                </blobstorage>
-              </zodb_db>
-              ''';
-          };
-        }
-      '';
-      description = lib.mdDoc "zope2 instances to be created automatically by the system.";
-    };
-  };
-
-  ###### implementation
-
-  config = mkIf (cfg.instances != {}) {
-
-    users.users.zope2 = {
-      isSystemUser = true;
-      group = "zope2";
-    };
-    users.groups.zope2 = {};
-
-    systemd.services =
-      let
-
-        createZope2Instance = opts: name:
-          let
-            interpreter = pkgs.writeScript "interpreter"
-              ''
-              import sys
-
-              _interactive = True
-              if len(sys.argv) > 1:
-                  _options, _args = __import__("getopt").getopt(sys.argv[1:], 'ic:m:')
-                  _interactive = False
-                  for (_opt, _val) in _options:
-                      if _opt == '-i':
-                          _interactive = True
-                      elif _opt == '-c':
-                          exec _val
-                      elif _opt == '-m':
-                          sys.argv[1:] = _args
-                          _args = []
-                          __import__("runpy").run_module(
-                              _val, {}, "__main__", alter_sys=True)
-
-                  if _args:
-                      sys.argv[:] = _args
-                      __file__ = _args[0]
-                      del _options, _args
-                      execfile(__file__)
-
-              if _interactive:
-                  del _interactive
-                  __import__("code").interact(banner="", local=globals())
-              '';
-            env = pkgs.buildEnv {
-              name = "zope2-${name}-env";
-              paths = [
-                pkgs.python27
-                pkgs.python27Packages.recursive-pth-loader
-                pkgs.python27Packages."plone.recipe.zope2instance"
-              ] ++ attrValues pkgs.python27.modules
-                ++ opts.packages;
-              postBuild =
-                ''
-                echo "#!$out/bin/python" > $out/bin/interpreter
-                cat ${interpreter} >> $out/bin/interpreter
-                '';
-            };
-            conf = pkgs.writeText "zope2-${name}-conf"
-              ''
-              %define INSTANCEHOME ${env}
-              instancehome $INSTANCEHOME
-              %define CLIENTHOME ${opts.clientHome}/${opts.name}
-              clienthome $CLIENTHOME
-
-              debug-mode off
-              security-policy-implementation C
-              verbose-security off
-              default-zpublisher-encoding utf-8
-              zserver-threads ${toString opts.threads}
-              effective-user ${opts.user}
-
-              pid-filename ${opts.clientHome}/${opts.name}/pid
-              lock-filename ${opts.clientHome}/${opts.name}/lock
-              python-check-interval 1000
-              enable-product-installation off
-
-              <environment>
-                zope_i18n_compile_mo_files false
-              </environment>
-
-              <eventlog>
-              level INFO
-              <logfile>
-                  path /var/log/zope2/${name}.log
-                  level INFO
-              </logfile>
-              </eventlog>
-
-              <logger access>
-              level WARN
-              <logfile>
-                  path /var/log/zope2/${name}-Z2.log
-                  format %(message)s
-              </logfile>
-              </logger>
-
-              <http-server>
-              address ${opts.http_address}
-              </http-server>
-
-              <zodb_db temporary>
-              <temporarystorage>
-                  name temporary storage for sessioning
-              </temporarystorage>
-              mount-point /temp_folder
-              container-class Products.TemporaryFolder.TemporaryContainer
-              </zodb_db>
-
-              ${opts.extra}
-              '';
-            ctlScript = pkgs.writeScript "zope2-${name}-ctl-script"
-              ''
-              #!${env}/bin/python
-
-              import sys
-              import plone.recipe.zope2instance.ctl
-
-              if __name__ == '__main__':
-                  sys.exit(plone.recipe.zope2instance.ctl.main(
-                      ["-C", "${conf}"]
-                      + sys.argv[1:]))
-              '';
-
-            ctl = pkgs.writeScript "zope2-${name}-ctl"
-              ''
-              #!${pkgs.bash}/bin/bash -e
-              export PYTHONHOME=${env}
-              exec ${ctlScript} "$@"
-              '';
-          in {
-            #description = "${name} instance";
-            after = [ "network.target" ];  # with RelStorage also add "postgresql.service"
-            wantedBy = [ "multi-user.target" ];
-            path = opts.packages;
-            preStart =
-              ''
-              mkdir -p /var/log/zope2/
-              touch /var/log/zope2/${name}.log
-              touch /var/log/zope2/${name}-Z2.log
-              chown ${opts.user} /var/log/zope2/${name}.log
-              chown ${opts.user} /var/log/zope2/${name}-Z2.log
-
-              mkdir -p ${opts.clientHome}/filestorage ${opts.clientHome}/blobstorage
-              mkdir -p ${opts.clientHome}/${opts.name}
-              chown ${opts.user} ${opts.clientHome} -R
-
-              ${ctl} adduser admin admin
-              '';
-
-            serviceConfig.Type = "forking";
-            serviceConfig.ExecStart = "${ctl} start";
-            serviceConfig.ExecStop = "${ctl} stop";
-            serviceConfig.ExecReload = "${ctl} restart";
-          };
-
-      in listToAttrs (map (name: { name = "zope2-${name}"; value = createZope2Instance (builtins.getAttr name cfg.instances) name; }) (builtins.attrNames cfg.instances));
-
-  };
-
-}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/deepin.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/deepin.nix
index 7d3acada6073..0824d6e30a8a 100644
--- a/nixpkgs/nixos/modules/services/x11/desktop-managers/deepin.nix
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/deepin.nix
@@ -173,19 +173,20 @@ in
           ];
           optionalPackages = [
             onboard # dde-dock plugin
-            deepin-camera
             deepin-calculator
             deepin-compressor
             deepin-editor
             deepin-picker
             deepin-draw
-            deepin-album
-            deepin-image-viewer
             deepin-music
             deepin-movie-reborn
             deepin-system-monitor
-            deepin-screen-recorder
             deepin-shortcut-viewer
+            # freeimage has knownVulnerabilties, don't install packages using freeiamge by default
+            # deepin-album
+            # deepin-camera
+            # deepin-image-viewer
+            # deepin-screen-recorder
           ];
         in
         requiredPackages
diff --git a/nixpkgs/nixos/modules/system/boot/stage-1.nix b/nixpkgs/nixos/modules/system/boot/stage-1.nix
index 8f3f3612805f..90a74c0ac578 100644
--- a/nixpkgs/nixos/modules/system/boot/stage-1.nix
+++ b/nixpkgs/nixos/modules/system/boot/stage-1.nix
@@ -3,7 +3,7 @@
 # the modules necessary to mount the root file system, then calls the
 # init in the root file system to start the second boot stage.
 
-{ config, lib, utils, pkgs, ... }:
+{ config, options, lib, utils, pkgs, ... }:
 
 with lib;
 
@@ -636,10 +636,8 @@ in
       };
 
     boot.initrd.supportedFilesystems = mkOption {
-      default = [ ];
-      example = [ "btrfs" ];
-      type = types.listOf types.str;
-      description = lib.mdDoc "Names of supported filesystem types in the initial ramdisk.";
+      default = { };
+      inherit (options.boot.supportedFilesystems) example type description;
     };
 
     boot.initrd.verbose = mkOption {
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix b/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
index 9641921fc795..f83837fbc6d4 100644
--- a/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
@@ -90,8 +90,6 @@ let
     inherit (cfg) packages package;
   };
 
-  fileSystems = filter utils.fsNeededForBoot config.system.build.fileSystems;
-
   kernel-name = config.boot.kernelPackages.kernel.name or "kernel";
   modulesTree = config.system.modulesTree.override { name = kernel-name + "-modules"; };
   firmware = config.hardware.firmware;
diff --git a/nixpkgs/nixos/modules/system/boot/uki.nix b/nixpkgs/nixos/modules/system/boot/uki.nix
index 63a7cbc5967b..ce00ac8e6397 100644
--- a/nixpkgs/nixos/modules/system/boot/uki.nix
+++ b/nixpkgs/nixos/modules/system/boot/uki.nix
@@ -27,6 +27,20 @@ in
         description = lib.mdDoc "Version of the image or generation the UKI belongs to";
       };
 
+      tries = lib.mkOption {
+        type = lib.types.nullOr lib.types.ints.unsigned;
+        default = null;
+        description = lib.mdDoc ''
+          Number of boot attempts before this UKI is considered bad.
+
+          If no tries are specified (the default) automatic boot assessment remains inactive.
+
+          See documentation on [Automatic Boot Assessment](https://systemd.io/AUTOMATIC_BOOT_ASSESSMENT/) and
+          [boot counting](https://uapi-group.org/specifications/specs/boot_loader_specification/#boot-counting)
+          for more information.
+        '';
+      };
+
       settings = lib.mkOption {
         type = format.type;
         description = lib.mdDoc ''
@@ -69,8 +83,9 @@ in
         name = config.boot.uki.name;
         version = config.boot.uki.version;
         versionInfix = if version != null then "_${version}" else "";
+        triesInfix = if cfg.tries != null then "+${builtins.toString cfg.tries}" else "";
       in
-      name + versionInfix + ".efi";
+      name + versionInfix + triesInfix + ".efi";
 
     system.build.uki = pkgs.runCommand config.system.boot.loader.ukiFile { } ''
       mkdir -p $out
diff --git a/nixpkgs/nixos/modules/system/etc/etc.nix b/nixpkgs/nixos/modules/system/etc/etc.nix
index baf37ba6def3..9f735364196c 100644
--- a/nixpkgs/nixos/modules/system/etc/etc.nix
+++ b/nixpkgs/nixos/modules/system/etc/etc.nix
@@ -238,7 +238,9 @@ in
       # this should not run because /etc is mounted via a systemd mount unit
       # instead. To a large extent this mimics what composefs does. Because
       # it's relatively simple, however, we avoid the composefs dependency.
-      if [[ ! $IN_NIXOS_SYSTEMD_STAGE1 ]]; then
+      # Since this script is not idempotent, it should not run when etc hasn't
+      # changed.
+      if [[ ! $IN_NIXOS_SYSTEMD_STAGE1 ]] && [[ "${config.system.build.etc}/etc" != "$(readlink -f /run/current-system/etc)" ]]; then
         echo "remounting /etc..."
 
         tmpMetadataMount=$(mktemp --directory)
diff --git a/nixpkgs/nixos/modules/tasks/filesystems.nix b/nixpkgs/nixos/modules/tasks/filesystems.nix
index 1378a0090c1d..e72a1e37759e 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems.nix
@@ -246,10 +246,23 @@ in
     };
 
     boot.supportedFilesystems = mkOption {
-      default = [ ];
-      example = [ "btrfs" ];
-      type = types.listOf types.str;
-      description = lib.mdDoc "Names of supported filesystem types.";
+      default = { };
+      example = lib.literalExpression ''
+        {
+          btrfs = true;
+          zfs = lib.mkForce false;
+        }
+      '';
+      type = types.coercedTo
+        (types.listOf types.str)
+        (enabled: lib.listToAttrs (map (fs: lib.nameValuePair fs true) enabled))
+        (types.attrsOf types.bool);
+      description = lib.mdDoc ''
+        Names of supported filesystem types, or an attribute set of file system types
+        and their state. The set form may be used together with `lib.mkForce` to
+        explicitly disable support for specific filesystems, e.g. to disable ZFS
+        with an unsupported kernel.
+      '';
     };
 
     boot.specialFileSystems = mkOption {
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/apfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/apfs.nix
index 2f2be351df61..980a3ad0f9c4 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/apfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/apfs.nix
@@ -4,12 +4,12 @@ with lib;
 
 let
 
-  inInitrd = any (fs: fs == "apfs") config.boot.initrd.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.apfs or false;
 
 in
 
 {
-  config = mkIf (any (fs: fs == "apfs") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.apfs or false) {
 
     system.fsPackages = [ pkgs.apfsprogs ];
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/bcachefs.nix b/nixpkgs/nixos/modules/tasks/filesystems/bcachefs.nix
index 3b990ce30b21..ba33edd702f7 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/bcachefs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/bcachefs.nix
@@ -118,7 +118,7 @@ let
 in
 
 {
-  config = lib.mkIf (lib.elem "bcachefs" config.boot.supportedFilesystems) (lib.mkMerge [
+  config = lib.mkIf (config.boot.supportedFilesystems.bcachefs or false) (lib.mkMerge [
     {
       inherit assertions;
       # needed for systemd-remount-fs
@@ -133,7 +133,7 @@ in
       };
     }
 
-    (lib.mkIf ((lib.elem "bcachefs" config.boot.initrd.supportedFilesystems) || (bootFs != {})) {
+    (lib.mkIf ((config.boot.initrd.supportedFilesystems.bcachefs or false) || (bootFs != {})) {
       inherit assertions;
       # chacha20 and poly1305 are required only for decryption attempts
       boot.initrd.availableKernelModules = [ "bcachefs" "sha256" "chacha20" "poly1305" ];
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/btrfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/btrfs.nix
index 87fe326c0974..8494a06f97a2 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/btrfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/btrfs.nix
@@ -4,8 +4,8 @@ with lib;
 
 let
 
-  inInitrd = any (fs: fs == "btrfs") config.boot.initrd.supportedFilesystems;
-  inSystem = any (fs: fs == "btrfs") config.boot.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.btrfs or false;
+  inSystem = config.boot.supportedFilesystems.btrfs or false;
 
   cfgScrub = config.services.btrfs.autoScrub;
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/cifs.nix b/nixpkgs/nixos/modules/tasks/filesystems/cifs.nix
index 837b9e19bfb9..5a562b2940f7 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/cifs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/cifs.nix
@@ -4,14 +4,14 @@ with lib;
 
 let
 
-  inInitrd = any (fs: fs == "cifs") config.boot.initrd.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.cifs or false;
 
 in
 
 {
   config = {
 
-    system.fsPackages = mkIf (any (fs: fs == "cifs") config.boot.supportedFilesystems) [ pkgs.cifs-utils ];
+    system.fsPackages = mkIf (config.boot.supportedFilesystems.cifs or false) [ pkgs.cifs-utils ];
 
     boot.initrd.availableKernelModules = mkIf inInitrd
       [ "cifs" "nls_utf8" "hmac" "md4" "ecb" "des_generic" "sha256" ];
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/ecryptfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/ecryptfs.nix
index 8138e6591610..f966a1be1536 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/ecryptfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/ecryptfs.nix
@@ -4,7 +4,7 @@
 with lib;
 
 {
-  config = mkIf (any (fs: fs == "ecryptfs") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.ecryptfs or false) {
     system.fsPackages = [ pkgs.ecryptfs ];
     security.wrappers = {
       "mount.ecryptfs_private" =
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/erofs.nix b/nixpkgs/nixos/modules/tasks/filesystems/erofs.nix
index a3d657669350..b13fa2531557 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/erofs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/erofs.nix
@@ -2,8 +2,8 @@
 
 let
 
-  inInitrd = lib.any (fs: fs == "erofs") config.boot.initrd.supportedFilesystems;
-  inSystem = lib.any (fs: fs == "erofs") config.boot.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.erofs or false;
+  inSystem = config.boot.supportedFilesystems.erofs or false;
 
 in
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/exfat.nix b/nixpkgs/nixos/modules/tasks/filesystems/exfat.nix
index 540b9b91c3ec..4011653c00df 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/exfat.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/exfat.nix
@@ -3,7 +3,7 @@
 with lib;
 
 {
-  config = mkIf (any (fs: fs == "exfat") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.exfat or false) {
     system.fsPackages = if config.boot.kernelPackages.kernelOlder "5.7" then [
       pkgs.exfat # FUSE
     ] else [
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/ext.nix b/nixpkgs/nixos/modules/tasks/filesystems/ext.nix
index 1c34ee2c7035..165fe9474c3e 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/ext.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/ext.nix
@@ -2,8 +2,10 @@
 
 let
 
-  inInitrd = lib.any (fs: fs == "ext2" || fs == "ext3" || fs == "ext4") config.boot.initrd.supportedFilesystems;
-  inSystem = lib.any (fs: fs == "ext2" || fs == "ext3" || fs == "ext4") config.boot.supportedFilesystems;
+  hasExtX = s: s.ext2 or s.ext3 or s.ext4 or false;
+
+  inInitrd = hasExtX config.boot.initrd.supportedFilesystems;
+  inSystem = hasExtX config.boot.supportedFilesystems;
 
 in
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/f2fs.nix b/nixpkgs/nixos/modules/tasks/filesystems/f2fs.nix
index 4f99f9a57fa6..f4f5fcab9cae 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/f2fs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/f2fs.nix
@@ -3,11 +3,10 @@
 with lib;
 
 let
-  inInitrd = any (fs: fs == "f2fs") config.boot.initrd.supportedFilesystems;
-  fileSystems = filter (x: x.fsType == "f2fs") config.system.build.fileSystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.f2fs or false;
 in
 {
-  config = mkIf (any (fs: fs == "f2fs") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.f2fs or false) {
 
     system.fsPackages = [ pkgs.f2fs-tools ];
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/glusterfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/glusterfs.nix
index e8c7fa8efbae..02ef95262dbd 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/glusterfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/glusterfs.nix
@@ -3,7 +3,7 @@
 with lib;
 
 {
-  config = mkIf (any (fs: fs == "glusterfs") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.glusterfs or false) {
 
     system.fsPackages = [ pkgs.glusterfs ];
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/jfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/jfs.nix
index b5132b4caa33..73ddb0fb18bb 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/jfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/jfs.nix
@@ -3,10 +3,10 @@
 with lib;
 
 let
-  inInitrd = any (fs: fs == "jfs") config.boot.initrd.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.jfs or false;
 in
 {
-  config = mkIf (any (fs: fs == "jfs") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.jfs or false) {
 
     system.fsPackages = [ pkgs.jfsutils ];
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/nfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/nfs.nix
index 8c631f0772db..462568b5db3e 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/nfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/nfs.nix
@@ -4,7 +4,7 @@ with lib;
 
 let
 
-  inInitrd = any (fs: fs == "nfs") config.boot.initrd.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.nfs or false;
 
   nfsStateDir = "/var/lib/nfs";
 
@@ -58,7 +58,7 @@ in
 
   ###### implementation
 
-  config = mkIf (any (fs: fs == "nfs" || fs == "nfs4") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.nfs or config.boot.supportedFilesystems.nfs4 or false) {
 
     services.rpcbind.enable = true;
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/ntfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/ntfs.nix
index c40d2a1a80bc..99ba494a7a39 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/ntfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/ntfs.nix
@@ -3,7 +3,7 @@
 with lib;
 
 {
-  config = mkIf (any (fs: fs == "ntfs" || fs == "ntfs-3g") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.ntfs or config.boot.supportedFilesystems.ntfs-3g or false) {
 
     system.fsPackages = [ pkgs.ntfs3g ];
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/reiserfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/reiserfs.nix
index 3c6a0f0cd917..f3f5e6aaa10b 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/reiserfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/reiserfs.nix
@@ -4,12 +4,12 @@ with lib;
 
 let
 
-  inInitrd = any (fs: fs == "reiserfs") config.boot.initrd.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.reiserfs or false;
 
 in
 
 {
-  config = mkIf (any (fs: fs == "reiserfs") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.reiserfs or false) {
 
     system.fsPackages = [ pkgs.reiserfsprogs ];
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/squashfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/squashfs.nix
index 10d45a21d3ca..a0fac904766a 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/squashfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/squashfs.nix
@@ -2,7 +2,7 @@
 
 let
 
-  inInitrd = lib.any (fs: fs == "squashfs") config.boot.initrd.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.squashfs or false;
 
 in
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/sshfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/sshfs.nix
index cd71dda16d8b..63ff7f2b6b39 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/sshfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/sshfs.nix
@@ -1,7 +1,11 @@
 { config, lib, pkgs, ... }:
 
 {
-  config = lib.mkIf (lib.any (fs: fs == "sshfs" || fs == "fuse.sshfs") config.boot.supportedFilesystems) {
-    system.fsPackages = [ pkgs.sshfs ];
-  };
+  config = lib.mkIf
+    (config.boot.supportedFilesystems.sshfs
+      or config.boot.supportedFilesystems."fuse.sshfs"
+      or false)
+    {
+      system.fsPackages = [ pkgs.sshfs ];
+    };
 }
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/unionfs-fuse.nix b/nixpkgs/nixos/modules/tasks/filesystems/unionfs-fuse.nix
index f9954b5182f9..929454ff1529 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/unionfs-fuse.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/unionfs-fuse.nix
@@ -3,7 +3,7 @@
 {
   config = lib.mkMerge [
 
-    (lib.mkIf (lib.any (fs: fs == "unionfs-fuse") config.boot.initrd.supportedFilesystems) {
+    (lib.mkIf (config.boot.initrd.supportedFilesystems.unionfs-fuse or false) {
       boot.initrd.kernelModules = [ "fuse" ];
 
       boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
@@ -35,7 +35,7 @@
       };
     })
 
-    (lib.mkIf (lib.any (fs: fs == "unionfs-fuse") config.boot.supportedFilesystems) {
+    (lib.mkIf (config.boot.supportedFilesystems.unionfs-fuse or false) {
       system.fsPackages = [ pkgs.unionfs-fuse ];
     })
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/vboxsf.nix b/nixpkgs/nixos/modules/tasks/filesystems/vboxsf.nix
index 5497194f6a8d..00245b5af252 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/vboxsf.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/vboxsf.nix
@@ -4,7 +4,7 @@ with lib;
 
 let
 
-  inInitrd = any (fs: fs == "vboxsf") config.boot.initrd.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.vboxsf or false;
 
   package = pkgs.runCommand "mount.vboxsf" { preferLocalBuild = true; } ''
     mkdir -p $out/bin
@@ -13,7 +13,7 @@ let
 in
 
 {
-  config = mkIf (any (fs: fs == "vboxsf") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.vboxsf or false) {
 
     system.fsPackages = [ package ];
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/vfat.nix b/nixpkgs/nixos/modules/tasks/filesystems/vfat.nix
index 9281b34633c2..d7acc0c9e50b 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/vfat.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/vfat.nix
@@ -4,12 +4,12 @@ with lib;
 
 let
 
-  inInitrd = any (fs: fs == "vfat") config.boot.initrd.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.vfat or false;
 
 in
 
 {
-  config = mkIf (any (fs: fs == "vfat") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.vfat or false) {
 
     system.fsPackages = [ pkgs.dosfstools pkgs.mtools ];
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/xfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/xfs.nix
index 76f31e660ad3..50dc1b3340aa 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/xfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/xfs.nix
@@ -4,12 +4,12 @@ with lib;
 
 let
 
-  inInitrd = any (fs: fs == "xfs") config.boot.initrd.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.xfs or false;
 
 in
 
 {
-  config = mkIf (any (fs: fs == "xfs") config.boot.supportedFilesystems) {
+  config = mkIf (config.boot.supportedFilesystems.xfs or false) {
 
     system.fsPackages = [ pkgs.xfsprogs.bin ];
 
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/zfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/zfs.nix
index b289d2151eb7..98df6a40e8a1 100644
--- a/nixpkgs/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixpkgs/nixos/modules/tasks/filesystems/zfs.nix
@@ -20,8 +20,8 @@ let
   clevisDatasets = map (e: e.device) (filter (e: e.device != null && (hasAttr e.device config.boot.initrd.clevis.devices) && e.fsType == "zfs" && (fsNeededForBoot e)) config.system.build.fileSystems);
 
 
-  inInitrd = any (fs: fs == "zfs") config.boot.initrd.supportedFilesystems;
-  inSystem = any (fs: fs == "zfs") config.boot.supportedFilesystems;
+  inInitrd = config.boot.initrd.supportedFilesystems.zfs or false;
+  inSystem = config.boot.supportedFilesystems.zfs or false;
 
   autosnapPkg = pkgs.zfstools.override {
     zfs = cfgZfs.package;
diff --git a/nixpkgs/nixos/modules/virtualisation/containers.nix b/nixpkgs/nixos/modules/virtualisation/containers.nix
index 3e33cabf2660..b3d81078eb34 100644
--- a/nixpkgs/nixos/modules/virtualisation/containers.nix
+++ b/nixpkgs/nixos/modules/virtualisation/containers.nix
@@ -28,6 +28,43 @@ in
       description = lib.mdDoc "Enable the OCI seccomp BPF hook";
     };
 
+    cdi = {
+      dynamic.nvidia.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable dynamic CDI configuration for NVidia devices by running nvidia-container-toolkit on boot.
+        '';
+      };
+
+      static = mkOption {
+        type = types.attrs;
+        default = { };
+        description = lib.mdDoc ''
+          Declarative CDI specification. Each key of the attribute set
+          will be mapped to a file in /etc/cdi. It is required for every
+          key to be provided in JSON format.
+        '';
+        example = {
+          some-vendor = builtins.fromJSON ''
+              {
+                "cdiVersion": "0.5.0",
+                "kind": "some-vendor.com/foo",
+                "devices": [],
+                "containerEdits": []
+              }
+            '';
+
+          some-other-vendor = {
+            cdiVersion = "0.5.0";
+            kind = "some-other-vendor.com/bar";
+            devices = [];
+            containerEdits = [];
+          };
+        };
+      };
+    };
+
     containersConf.settings = mkOption {
       type = toml.type;
       default = { };
@@ -113,6 +150,8 @@ in
 
   config = lib.mkIf cfg.enable {
 
+    hardware.nvidia-container-toolkit-cdi-generator.enable = lib.mkIf cfg.cdi.dynamic.nvidia.enable true;
+
     virtualisation.containers.containersConf.cniPlugins = [ pkgs.cni-plugins ];
 
     virtualisation.containers.containersConf.settings = {
@@ -124,19 +163,28 @@ in
       };
     };
 
-    environment.etc."containers/containers.conf".source =
-      toml.generate "containers.conf" cfg.containersConf.settings;
-
-    environment.etc."containers/storage.conf".source =
-      toml.generate "storage.conf" cfg.storage.settings;
+    environment.etc = let
+      cdiStaticConfigurationFiles = (lib.attrsets.mapAttrs'
+        (name: value:
+          lib.attrsets.nameValuePair "cdi/${name}.json"
+            { text = builtins.toJSON value; })
+        cfg.cdi.static);
+    in {
+      "containers/containers.conf".source =
+        toml.generate "containers.conf" cfg.containersConf.settings;
+
+      "containers/storage.conf".source =
+        toml.generate "storage.conf" cfg.storage.settings;
+
+      "containers/registries.conf".source = toml.generate "registries.conf" {
+        registries = lib.mapAttrs (n: v: { registries = v; }) cfg.registries;
+      };
 
-    environment.etc."containers/registries.conf".source = toml.generate "registries.conf" {
-      registries = lib.mapAttrs (n: v: { registries = v; }) cfg.registries;
-    };
+      "containers/policy.json".source =
+        if cfg.policy != { } then pkgs.writeText "policy.json" (builtins.toJSON cfg.policy)
+        else "${pkgs.skopeo.policy}/default-policy.json";
+    } // cdiStaticConfigurationFiles;
 
-    environment.etc."containers/policy.json".source =
-      if cfg.policy != { } then pkgs.writeText "policy.json" (builtins.toJSON cfg.policy)
-      else "${pkgs.skopeo.policy}/default-policy.json";
   };
 
 }
diff --git a/nixpkgs/nixos/modules/virtualisation/cri-o.nix b/nixpkgs/nixos/modules/virtualisation/cri-o.nix
index dacd700537c7..417cf516c7f4 100644
--- a/nixpkgs/nixos/modules/virtualisation/cri-o.nix
+++ b/nixpkgs/nixos/modules/virtualisation/cri-o.nix
@@ -6,7 +6,7 @@ let
 
   crioPackage = pkgs.cri-o.override {
     extraPackages = cfg.extraPackages
-      ++ lib.optional (builtins.elem "zfs" config.boot.supportedFilesystems) config.boot.zfs.package;
+      ++ lib.optional (config.boot.supportedFilesystems.zfs or false) config.boot.zfs.package;
   };
 
   format = pkgs.formats.toml { };
diff --git a/nixpkgs/nixos/modules/virtualisation/docker.nix b/nixpkgs/nixos/modules/virtualisation/docker.nix
index d4d34d13a94d..cceb186e0b36 100644
--- a/nixpkgs/nixos/modules/virtualisation/docker.nix
+++ b/nixpkgs/nixos/modules/virtualisation/docker.nix
@@ -72,6 +72,8 @@ in
         type = types.bool;
         default = false;
         description = lib.mdDoc ''
+          **Deprecated**, please use virtualisation.containers.cdi.dynamic.nvidia.enable instead.
+
           Enable nvidia-docker wrapper, supporting NVIDIA GPUs inside docker containers.
         '';
       };
@@ -185,6 +187,16 @@ in
       users.groups.docker.gid = config.ids.gids.docker;
       systemd.packages = [ cfg.package ];
 
+      # Docker 25.0.0 supports CDI by default
+      # (https://docs.docker.com/engine/release-notes/25.0/#new). Encourage
+      # moving to CDI as opposed to having deprecated runtime
+      # wrappers.
+      warnings = lib.optionals (cfg.enableNvidia && (lib.strings.versionAtLeast cfg.package.version "25")) [
+        ''
+          You have set virtualisation.docker.enableNvidia. This option is deprecated, please set virtualisation.containers.cdi.dynamic.nvidia.enable instead.
+        ''
+      ];
+
       systemd.services.docker = {
         wantedBy = optional cfg.enableOnBoot "multi-user.target";
         after = [ "network.target" "docker.socket" ];
diff --git a/nixpkgs/nixos/modules/virtualisation/hyperv-image.nix b/nixpkgs/nixos/modules/virtualisation/hyperv-image.nix
index efaea0c110d2..fddff7bf1c69 100644
--- a/nixpkgs/nixos/modules/virtualisation/hyperv-image.nix
+++ b/nixpkgs/nixos/modules/virtualisation/hyperv-image.nix
@@ -60,7 +60,6 @@ in {
     boot.growPartition = true;
 
     boot.loader.grub = {
-      version = 2;
       device = "nodev";
       efiSupport = true;
       efiInstallAsRemovable = true;
diff --git a/nixpkgs/nixos/modules/virtualisation/linode-config.nix b/nixpkgs/nixos/modules/virtualisation/linode-config.nix
index bbf81bda9c02..209bff57ea8b 100644
--- a/nixpkgs/nixos/modules/virtualisation/linode-config.nix
+++ b/nixpkgs/nixos/modules/virtualisation/linode-config.nix
@@ -59,7 +59,6 @@ with lib;
 
       grub = {
         enable = true;
-        version = 2;
         forceInstall = true;
         device = "nodev";
 
diff --git a/nixpkgs/nixos/modules/virtualisation/lxc-container.nix b/nixpkgs/nixos/modules/virtualisation/lxc-container.nix
index 8d3a480e6dc8..95e3083ff9ed 100644
--- a/nixpkgs/nixos/modules/virtualisation/lxc-container.nix
+++ b/nixpkgs/nixos/modules/virtualisation/lxc-container.nix
@@ -14,7 +14,9 @@
 
   options = { };
 
-  config = {
+  config = let
+    initScript = if config.boot.initrd.systemd.enable then "prepare-root" else "init";
+  in {
     boot.isContainer = true;
     boot.postBootCommands =
       ''
@@ -41,7 +43,7 @@
 
       contents = [
         {
-          source = config.system.build.toplevel + "/init";
+          source = config.system.build.toplevel + "/${initScript}";
           target = "/sbin/init";
         }
         # Technically this is not required for lxc, but having also make this configuration work with systemd-nspawn.
@@ -65,7 +67,7 @@
 
       pseudoFiles = [
         "/sbin d 0755 0 0"
-        "/sbin/init s 0555 0 0 ${config.system.build.toplevel}/init"
+        "/sbin/init s 0555 0 0 ${config.system.build.toplevel}/${initScript}"
         "/dev d 0755 0 0"
         "/proc d 0555 0 0"
         "/sys d 0555 0 0"
@@ -74,7 +76,7 @@
 
     system.build.installBootLoader = pkgs.writeScript "install-lxd-sbin-init.sh" ''
       #!${pkgs.runtimeShell}
-      ${pkgs.coreutils}/bin/ln -fs "$1/init" /sbin/init
+      ${pkgs.coreutils}/bin/ln -fs "$1/${initScript}" /sbin/init
     '';
 
     # networkd depends on this, but systemd module disables this for containers
@@ -83,7 +85,7 @@
     systemd.packages = [ pkgs.distrobuilder.generator ];
 
     system.activationScripts.installInitScript = lib.mkForce ''
-      ln -fs $systemConfig/init /sbin/init
+      ln -fs $systemConfig/${initScript} /sbin/init
     '';
   };
 }
diff --git a/nixpkgs/nixos/modules/virtualisation/oci-containers.nix b/nixpkgs/nixos/modules/virtualisation/oci-containers.nix
index b6a7b1154c4a..a88715587d65 100644
--- a/nixpkgs/nixos/modules/virtualisation/oci-containers.nix
+++ b/nixpkgs/nixos/modules/virtualisation/oci-containers.nix
@@ -252,10 +252,13 @@ let
       text = ''
         ${cfg.backend} rm -f ${name} || true
         ${optionalString (isValidLogin container.login) ''
+          # try logging in, if it fails, check if image exists locally
           ${cfg.backend} login \
           ${container.login.registry} \
           --username ${container.login.username} \
-          --password-stdin < ${container.login.passwordFile}
+          --password-stdin < ${container.login.passwordFile} \
+          || ${cfg.backend} image inspect ${container.image} >/dev/null \
+          || { echo "image doesn't exist locally and login failed" >&2 ; exit 1; }
         ''}
         ${optionalString (container.imageFile != null) ''
           ${cfg.backend} load -i ${container.imageFile}
diff --git a/nixpkgs/nixos/modules/virtualisation/podman/default.nix b/nixpkgs/nixos/modules/virtualisation/podman/default.nix
index 47382f9beab0..5a99dc8a1bb9 100644
--- a/nixpkgs/nixos/modules/virtualisation/podman/default.nix
+++ b/nixpkgs/nixos/modules/virtualisation/podman/default.nix
@@ -9,7 +9,7 @@ let
     extraPackages = cfg.extraPackages
       # setuid shadow
       ++ [ "/run/wrappers" ]
-      ++ lib.optional (builtins.elem "zfs" config.boot.supportedFilesystems) config.boot.zfs.package;
+      ++ lib.optional (config.boot.supportedFilesystems.zfs or false) config.boot.zfs.package;
   });
 
   # Provides a fake "docker" binary mapping to podman
@@ -82,6 +82,8 @@ in
       type = types.bool;
       default = false;
       description = lib.mdDoc ''
+        **Deprecated**, please use virtualisation.containers.cdi.dynamic.nvidia.enable instead.
+
         Enable use of NVidia GPUs from within podman containers.
       '';
     };
@@ -166,6 +168,12 @@ in
       inherit (networkConfig) dns_enabled network_interface;
     in
     lib.mkIf cfg.enable {
+      warnings = lib.optionals cfg.enableNvidia [
+        ''
+          You have set virtualisation.podman.enableNvidia. This option is deprecated, please set virtualisation.containers.cdi.dynamic.nvidia.enable instead.
+        ''
+      ];
+
       environment.systemPackages = [ cfg.package ]
         ++ lib.optional cfg.dockerCompat dockerCompat;
 
diff --git a/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix b/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix
index 55a214325118..75ba6dacc122 100644
--- a/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix
@@ -877,9 +877,11 @@ in
         type = types.package;
         default = (pkgs.OVMF.override {
           secureBoot = cfg.useSecureBoot;
+          systemManagementModeRequired = cfg.useSecureBoot;
         }).fd;
         defaultText = ''(pkgs.OVMF.override {
           secureBoot = cfg.useSecureBoot;
+          systemManagementModeRequired = cfg.useSecureBoot;
         }).fd'';
         description =
         lib.mdDoc "OVMF firmware package, defaults to OVMF configured with secure boot if needed.";
@@ -1183,6 +1185,10 @@ in
         "-tpmdev emulator,id=tpm_dev_0,chardev=chrtpm"
         "-device ${cfg.tpm.deviceModel},tpmdev=tpm_dev_0"
       ])
+      (mkIf (cfg.efi.OVMF.systemManagementModeRequired or false) [
+        "-machine" "q35,smm=on"
+        "-global" "driver=cfi.pflash01,property=secure,value=on"
+      ])
     ];
 
     virtualisation.qemu.drives = mkMerge [
diff --git a/nixpkgs/nixos/modules/virtualisation/vmware-image.nix b/nixpkgs/nixos/modules/virtualisation/vmware-image.nix
index a38713b4d4ee..3674b37d0b97 100644
--- a/nixpkgs/nixos/modules/virtualisation/vmware-image.nix
+++ b/nixpkgs/nixos/modules/virtualisation/vmware-image.nix
@@ -80,7 +80,6 @@ in {
     boot.growPartition = true;
 
     boot.loader.grub = {
-      version = 2;
       device = "nodev";
       efiSupport = true;
       efiInstallAsRemovable = true;
diff --git a/nixpkgs/nixos/tests/activation/etc-overlay-immutable.nix b/nixpkgs/nixos/tests/activation/etc-overlay-immutable.nix
index 70c3623b929c..f347f9cf8efe 100644
--- a/nixpkgs/nixos/tests/activation/etc-overlay-immutable.nix
+++ b/nixpkgs/nixos/tests/activation/etc-overlay-immutable.nix
@@ -20,11 +20,17 @@
   };
 
   testScript = ''
-    machine.succeed("findmnt --kernel --type overlay /etc")
-    machine.fail("stat /etc/newgen")
+    with subtest("/etc is mounted as an overlay"):
+      machine.succeed("findmnt --kernel --type overlay /etc")
 
-    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+    with subtest("switching to the same generation"):
+      machine.succeed("/run/current-system/bin/switch-to-configuration test")
 
-    assert machine.succeed("cat /etc/newgen") == "newgen"
+    with subtest("switching to a new generation"):
+      machine.fail("stat /etc/newgen")
+
+      machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+      assert machine.succeed("cat /etc/newgen") == "newgen"
   '';
 }
diff --git a/nixpkgs/nixos/tests/activation/etc-overlay-mutable.nix b/nixpkgs/nixos/tests/activation/etc-overlay-mutable.nix
index cfe7604fceb8..087c06408a71 100644
--- a/nixpkgs/nixos/tests/activation/etc-overlay-mutable.nix
+++ b/nixpkgs/nixos/tests/activation/etc-overlay-mutable.nix
@@ -18,13 +18,19 @@
   };
 
   testScript = ''
-    machine.succeed("findmnt --kernel --type overlay /etc")
-    machine.fail("stat /etc/newgen")
-    machine.succeed("echo -n 'mutable' > /etc/mutable")
+    with subtest("/etc is mounted as an overlay"):
+      machine.succeed("findmnt --kernel --type overlay /etc")
 
-    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+    with subtest("switching to the same generation"):
+      machine.succeed("/run/current-system/bin/switch-to-configuration test")
 
-    assert machine.succeed("cat /etc/newgen") == "newgen"
-    assert machine.succeed("cat /etc/mutable") == "mutable"
+    with subtest("switching to a new generation"):
+      machine.fail("stat /etc/newgen")
+      machine.succeed("echo -n 'mutable' > /etc/mutable")
+
+      machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+      assert machine.succeed("cat /etc/newgen") == "newgen"
+      assert machine.succeed("cat /etc/mutable") == "mutable"
   '';
 }
diff --git a/nixpkgs/nixos/tests/all-tests.nix b/nixpkgs/nixos/tests/all-tests.nix
index 8193c3dfe840..1144a5611dcf 100644
--- a/nixpkgs/nixos/tests/all-tests.nix
+++ b/nixpkgs/nixos/tests/all-tests.nix
@@ -539,6 +539,7 @@ in {
   mongodb = handleTest ./mongodb.nix {};
   moodle = handleTest ./moodle.nix {};
   moonraker = handleTest ./moonraker.nix {};
+  morph-browser = handleTest ./morph-browser.nix { };
   morty = handleTest ./morty.nix {};
   mosquitto = handleTest ./mosquitto.nix {};
   moosefs = handleTest ./moosefs.nix {};
@@ -771,6 +772,7 @@ in {
   sanoid = handleTest ./sanoid.nix {};
   scaphandre = handleTest ./scaphandre.nix {};
   schleuder = handleTest ./schleuder.nix {};
+  scrutiny = handleTest ./scrutiny.nix {};
   sddm = handleTest ./sddm.nix {};
   seafile = handleTest ./seafile.nix {};
   searx = handleTest ./searx.nix {};
diff --git a/nixpkgs/nixos/tests/ccache.nix b/nixpkgs/nixos/tests/ccache.nix
new file mode 100644
index 000000000000..a97ae0501767
--- /dev/null
+++ b/nixpkgs/nixos/tests/ccache.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "ccache";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ehmry ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+    environment.systemPackages = [ pkgs.hello ];
+    programs.ccache = {
+      enable = true;
+      packageNames = [ "hello" ];
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("nix-ccache --show-stats")
+      machine.succeed("hello")
+      machine.shutdown()
+    '';
+})
diff --git a/nixpkgs/nixos/tests/geoserver.nix b/nixpkgs/nixos/tests/geoserver.nix
index 7e5507a296ea..4f6f2b209d07 100644
--- a/nixpkgs/nixos/tests/geoserver.nix
+++ b/nixpkgs/nixos/tests/geoserver.nix
@@ -1,4 +1,18 @@
-{ pkgs, lib, ... }: {
+{ pkgs, lib, ... }:
+
+let
+  geoserver = pkgs.geoserver;
+  geoserverWithImporterExtension = pkgs.geoserver.withExtensions (ps: with ps; [ importer ]);
+
+  # Blacklisted extensions:
+  # - wps-jdbc needs a running (Postrgres) db server.
+  blacklist = [ "wps-jdbc" ];
+
+  blacklistedToNull = n: v: if ! builtins.elem n blacklist then v else null;
+  getNonBlackistedExtensionsAsList = ps: builtins.filter (x: x != null) (lib.attrsets.mapAttrsToList blacklistedToNull ps);
+  geoserverWithAllExtensions = pkgs.geoserver.withExtensions (ps: getNonBlackistedExtensionsAsList ps);
+in
+{
 
   name = "geoserver";
   meta = {
@@ -9,16 +23,57 @@
     machine = { pkgs, ... }: {
       virtualisation.diskSize = 2 * 1024;
 
-      environment.systemPackages = [ pkgs.geoserver ];
+      environment.systemPackages = [
+        geoserver
+        geoserverWithImporterExtension
+        geoserverWithAllExtensions
+      ];
     };
   };
 
   testScript = ''
+    from contextlib import contextmanager
+
+    curl_cmd = "curl --fail --connect-timeout 2"
+    curl_cmd_rest = f"{curl_cmd} -u admin:geoserver -X GET"
+    base_url = "http://localhost:8080/geoserver"
+    log_file = "./log.txt"
+
+    @contextmanager
+    def running_geoserver(pkg):
+      try:
+        print(f"Launching geoserver from {pkg}...")
+        machine.execute(f"{pkg}/bin/geoserver-startup > {log_file} 2>&1 &")
+        machine.wait_until_succeeds(f"{curl_cmd} {base_url} 2>&1", timeout=60)
+        yield
+      finally:
+        # We need to wait a little bit to make sure the server is properly
+        # shutdown before launching a new instance.
+        machine.execute(f"{pkg}/bin/geoserver-shutdown; sleep 1")
+
     start_all()
 
-    machine.execute("${pkgs.geoserver}/bin/geoserver-startup > /dev/null 2>&1 &")
-    machine.wait_until_succeeds("curl --fail --connect-timeout 2 http://localhost:8080/geoserver", timeout=60)
+    with running_geoserver("${geoserver}"):
+      machine.succeed(f"{curl_cmd} {base_url}/ows?service=WMS&version=1.3.0&request=GetCapabilities")
+
+      # No extensions yet.
+      machine.fail(f"{curl_cmd_rest} {base_url}/rest/imports")
+      machine.fail(f"{curl_cmd_rest} {base_url}/rest/monitor/requests.csv")
+
+
+    with running_geoserver("${geoserverWithImporterExtension}"):
+      machine.succeed(f"{curl_cmd_rest} {base_url}/rest/imports")
+      machine.fail(f"{curl_cmd_rest} {base_url}/rest/monitor/requests.csv")
+
+    with running_geoserver("${geoserverWithAllExtensions}"):
+      machine.succeed(f"{curl_cmd_rest} {base_url}/rest/imports")
+      machine.succeed(f"{curl_cmd_rest} {base_url}/rest/monitor/requests.csv")
+      _, stdout = machine.execute(f"cat {log_file}")
+      print(stdout.replace("\\n", "\n"))
+      assert "GDAL Native Library loaded" in stdout, "gdal"
+      assert "The turbo jpeg encoder is available for usage" in stdout, "libjpeg-turbo"
+      assert "org.geotools.imageio.netcdf.utilities.NetCDFUtilities" in stdout, "netcdf"
+      assert "Unable to load library 'netcdf'" not in stdout, "netcdf"
 
-    machine.succeed("curl --fail --connect-timeout 2 http://localhost:8080/geoserver/ows?service=WMS&version=1.3.0&request=GetCapabilities")
   '';
 }
diff --git a/nixpkgs/nixos/tests/incus/container.nix b/nixpkgs/nixos/tests/incus/container.nix
index 0e65cc1e1529..0f42d16f133d 100644
--- a/nixpkgs/nixos/tests/incus/container.nix
+++ b/nixpkgs/nixos/tests/incus/container.nix
@@ -1,11 +1,11 @@
-import ../make-test-python.nix ({ pkgs, lib, ... } :
+import ../make-test-python.nix ({ pkgs, lib, extra ? {}, ... } :
 
 let
   releases = import ../../release.nix {
     configuration = {
       # Building documentation makes the test unnecessarily take a longer time:
       documentation.enable = lib.mkForce false;
-    };
+    } // extra;
   };
 
   container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
diff --git a/nixpkgs/nixos/tests/incus/default.nix b/nixpkgs/nixos/tests/incus/default.nix
index c8e53774599b..ff36fe9d6730 100644
--- a/nixpkgs/nixos/tests/incus/default.nix
+++ b/nixpkgs/nixos/tests/incus/default.nix
@@ -5,7 +5,11 @@
   handleTestOn,
 }:
 {
-  container = import ./container.nix { inherit system pkgs; };
+  container-old-init = import ./container.nix { inherit system pkgs; };
+  container-new-init = import ./container.nix { inherit system pkgs; extra = {
+    # Enable new systemd init
+    boot.initrd.systemd.enable = true;
+  }; };
   lxd-to-incus = import ./lxd-to-incus.nix { inherit system pkgs; };
   preseed = import ./preseed.nix { inherit system pkgs; };
   socket-activated = import ./socket-activated.nix { inherit system pkgs; };
diff --git a/nixpkgs/nixos/tests/installer.nix b/nixpkgs/nixos/tests/installer.nix
index 7576fae41f83..b6cb6a0c6d45 100644
--- a/nixpkgs/nixos/tests/installer.nix
+++ b/nixpkgs/nixos/tests/installer.nix
@@ -526,8 +526,7 @@ let
             curl
           ]
           ++ optionals (bootLoader == "grub") (let
-            zfsSupport = lib.any (x: x == "zfs")
-              (extraInstallerConfig.boot.supportedFilesystems or []);
+            zfsSupport = extraInstallerConfig.boot.supportedFilesystems.zfs or false;
           in [
             (pkgs.grub2.override { inherit zfsSupport; })
             (pkgs.grub2_efi.override { inherit zfsSupport; })
diff --git a/nixpkgs/nixos/tests/matomo.nix b/nixpkgs/nixos/tests/matomo.nix
index 7dbef63136aa..130f3dd8485a 100644
--- a/nixpkgs/nixos/tests/matomo.nix
+++ b/nixpkgs/nixos/tests/matomo.nix
@@ -47,4 +47,8 @@ in {
     name = "matomo-beta";
     meta.maintainers = with maintainers; [ florianjacob kiwi mmilata twey boozedog ];
   };
+  matomo_5 = matomoTest pkgs.matomo_5 // {
+    name = "matomo-5";
+    meta.maintainers = with maintainers; [ florianjacob kiwi mmilata twey boozedog ] ++ lib.teams.flyingcircus.members;
+  };
 }
diff --git a/nixpkgs/nixos/tests/morph-browser.nix b/nixpkgs/nixos/tests/morph-browser.nix
new file mode 100644
index 000000000000..859e6bb47646
--- /dev/null
+++ b/nixpkgs/nixos/tests/morph-browser.nix
@@ -0,0 +1,53 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "morph-browser-standalone";
+  meta.maintainers = lib.teams.lomiri.members;
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+
+    environment = {
+      systemPackages = with pkgs.lomiri; [
+        suru-icon-theme
+        morph-browser
+      ];
+      variables = {
+        UITK_ICON_THEME = "suru";
+      };
+    };
+
+    i18n.supportedLocales = [ "all" ];
+
+    fonts.packages = with pkgs; [
+      # Intended font & helps with OCR
+      ubuntu_font_family
+    ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+
+      with subtest("morph browser launches"):
+          machine.execute("morph-browser >&2 &")
+          machine.wait_for_text(r"Web Browser|New|sites|Bookmarks")
+          machine.screenshot("morph_open")
+
+      with subtest("morph browser displays HTML"):
+          machine.send_chars("file://${pkgs.valgrind.doc}/share/doc/valgrind/html/index.html\n")
+          machine.wait_for_text("Valgrind Documentation")
+          machine.screenshot("morph_htmlcontent")
+
+      machine.succeed("pkill -f morph-browser")
+
+      with subtest("morph browser localisation works"):
+          machine.execute("env LANG=de_DE.UTF-8 morph-browser >&2 &")
+          machine.wait_for_text(r"Web-Browser|Neuer|Seiten|Lesezeichen")
+          machine.screenshot("morph_localised")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix b/nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix
index 3c090f0d3c3b..06afc589403d 100644
--- a/nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix
+++ b/nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix
@@ -39,7 +39,7 @@ in {
         };
         extraAppsEnable = true;
         extraApps = {
-          inherit (pkgs."nextcloud${lib.versions.major config.services.nextcloud.package.version}Packages".apps) notify_push;
+          inherit (pkgs."nextcloud${lib.versions.major config.services.nextcloud.package.version}Packages".apps) notify_push notes;
         };
         settings.trusted_proxies = [ "::1" ];
       };
@@ -84,7 +84,7 @@ in {
         "${withRcloneEnv} ${copySharedFile}"
     )
     client.wait_for_unit("multi-user.target")
-    client.execute("${pkgs.nextcloud-notify_push.passthru.test_client}/bin/test_client http://nextcloud ${adminuser} ${adminpass} >&2 &")
+    client.execute("${pkgs.lib.getExe pkgs.nextcloud-notify_push.passthru.test_client} http://nextcloud ${adminuser} ${adminpass} >&2 &")
     client.succeed(
         "${withRcloneEnv} ${diffSharedFile}"
     )
@@ -92,5 +92,7 @@ in {
 
     # redis cache should not be empty
     nextcloud.fail('test "[]" = "$(redis-cli --json KEYS "*")"')
+
+    nextcloud.fail("curl -f http://nextcloud/nix-apps/notes/lib/AppInfo/Application.php")
   '';
 })) args
diff --git a/nixpkgs/nixos/tests/power-profiles-daemon.nix b/nixpkgs/nixos/tests/power-profiles-daemon.nix
index c887cde4b829..8a54d8e8bab8 100644
--- a/nixpkgs/nixos/tests/power-profiles-daemon.nix
+++ b/nixpkgs/nixos/tests/power-profiles-daemon.nix
@@ -8,22 +8,22 @@ import ./make-test-python.nix ({ pkgs, ... }:
   nodes.machine = { pkgs, ... }: {
     security.polkit.enable = true;
     services.power-profiles-daemon.enable = true;
-    environment.systemPackages = [ pkgs.glib ];
+    environment.systemPackages = [ pkgs.glib pkgs.power-profiles-daemon ];
   };
 
   testScript = ''
     def get_profile():
         return machine.succeed(
-            """gdbus call --system --dest net.hadess.PowerProfiles --object-path /net/hadess/PowerProfiles \
-    --method org.freedesktop.DBus.Properties.Get 'net.hadess.PowerProfiles' 'ActiveProfile'
+            """gdbus call --system --dest org.freedesktop.UPower.PowerProfiles --object-path /org/freedesktop/UPower/PowerProfiles \
+    --method org.freedesktop.DBus.Properties.Get 'org.freedesktop.UPower.PowerProfiles' 'ActiveProfile'
     """
         )
 
 
     def set_profile(profile):
         return machine.succeed(
-            """gdbus call --system --dest net.hadess.PowerProfiles --object-path /net/hadess/PowerProfiles \
-    --method org.freedesktop.DBus.Properties.Set 'net.hadess.PowerProfiles' 'ActiveProfile' "<'{profile}'>"
+            """gdbus call --system --dest org.freedesktop.UPower.PowerProfiles --object-path /org/freedesktop/UPower/PowerProfiles \
+    --method org.freedesktop.DBus.Properties.Set 'org.freedesktop.UPower.PowerProfiles' 'ActiveProfile' "<'{profile}'>"
     """.format(
                 profile=profile
             )
@@ -42,5 +42,16 @@ import ./make-test-python.nix ({ pkgs, ... }:
     profile = get_profile()
     if not "balanced" in profile:
         raise Exception("Unable to set balanced profile")
+
+    # test powerprofilectl CLI
+    machine.succeed("powerprofilesctl set power-saver")
+    profile = get_profile()
+    if not "power-saver" in profile:
+        raise Exception("Unable to set power-saver profile with powerprofilectl")
+
+    machine.succeed("powerprofilesctl set balanced")
+    profile = get_profile()
+    if not "balanced" in profile:
+        raise Exception("Unable to set balanced profile with powerprofilectl")
   '';
 })
diff --git a/nixpkgs/nixos/tests/scrutiny.nix b/nixpkgs/nixos/tests/scrutiny.nix
new file mode 100644
index 000000000000..33160a6b3088
--- /dev/null
+++ b/nixpkgs/nixos/tests/scrutiny.nix
@@ -0,0 +1,70 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "scrutiny";
+  meta.maintainers = with lib.maintainers; [ jnsgruk ];
+
+  nodes = {
+    machine = { self, pkgs, lib, ... }: {
+      services = {
+        scrutiny.enable = true;
+        scrutiny.collector.enable = true;
+      };
+
+      environment.systemPackages =
+        let
+          seleniumScript = pkgs.writers.writePython3Bin "selenium-script"
+            {
+              libraries = with pkgs.python3Packages; [ selenium ];
+            } ''
+            from selenium import webdriver
+            from selenium.webdriver.common.by import By
+            from selenium.webdriver.firefox.options import Options
+            from selenium.webdriver.support.ui import WebDriverWait
+            from selenium.webdriver.support import expected_conditions as EC
+
+            options = Options()
+            options.add_argument("--headless")
+            service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}")  # noqa: E501
+
+            driver = webdriver.Firefox(options=options, service=service)
+            driver.implicitly_wait(10)
+            driver.get("http://localhost:8080/web/dashboard")
+
+            wait = WebDriverWait(driver, 10).until(
+              EC.text_to_be_present_in_element(
+                (By.TAG_NAME, "body"), "Drive health at a glance")
+            )
+
+            body_text = driver.find_element(By.TAG_NAME, "body").text
+            assert "Temperature history for each device" in body_text
+
+            driver.close()
+          '';
+        in
+        with pkgs; [ curl firefox-unwrapped geckodriver seleniumScript ];
+    };
+  };
+  # This is the test code that will check if our service is running correctly:
+  testScript = ''
+    start_all()
+
+    # Wait for InfluxDB to be available
+    machine.wait_for_unit("influxdb2")
+    machine.wait_for_open_port(8086)
+
+    # Wait for Scrutiny to be available
+    machine.wait_for_unit("scrutiny")
+    machine.wait_for_open_port(8080)
+
+    # Ensure the API responds as we expect
+    output = machine.succeed("curl localhost:8080/api/health")
+    assert output == '{"success":true}'
+
+    # Start the collector service to send some metrics
+    collect = machine.succeed("systemctl start scrutiny-collector.service")
+
+    # Ensure the application is actually rendered by the Javascript
+    machine.succeed("PYTHONUNBUFFERED=1 selenium-script")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-boot.nix b/nixpkgs/nixos/tests/systemd-boot.nix
index c0b37a230df0..ce3245f3d862 100644
--- a/nixpkgs/nixos/tests/systemd-boot.nix
+++ b/nixpkgs/nixos/tests/systemd-boot.nix
@@ -39,6 +39,32 @@ in
     '';
   };
 
+  # Test that systemd-boot works with secure boot
+  secureBoot = makeTest {
+    name = "systemd-boot-secure-boot";
+
+    nodes.machine = {
+      imports = [ common ];
+      environment.systemPackages = [ pkgs.sbctl ];
+      virtualisation.useSecureBoot = true;
+    };
+
+    testScript = ''
+      machine.start(allow_reboot=True)
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("sbctl create-keys")
+      machine.succeed("sbctl enroll-keys --yes-this-might-brick-my-machine")
+      machine.succeed('sbctl sign /boot/EFI/systemd/systemd-bootx64.efi')
+      machine.succeed('sbctl sign /boot/EFI/BOOT/BOOTX64.EFI')
+      machine.succeed('sbctl sign /boot/EFI/nixos/*bzImage.efi')
+
+      machine.reboot()
+
+      assert "Secure Boot: enabled (user)" in machine.succeed("bootctl status")
+    '';
+  };
+
   # Check that specialisations create corresponding boot entries.
   specialisation = makeTest {
     name = "systemd-boot-specialisation";