about summary refs log tree commit diff
path: root/nixpkgs/nixos
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2023-08-26 09:19:25 +0000
committerAlyssa Ross <hi@alyssa.is>2023-08-26 09:19:25 +0000
commit55abc327b49b4097e48c916e40803caa8cf46e8f (patch)
tree1c0420ab3fb21d9485460b912f1c3eae18781871 /nixpkgs/nixos
parent7936cf821dccc1eaade44b852db09d03fae8e5f3 (diff)
parent18324978d632ffc55ef1d928e81630c620f4f447 (diff)
downloadnixlib-55abc327b49b4097e48c916e40803caa8cf46e8f.tar
nixlib-55abc327b49b4097e48c916e40803caa8cf46e8f.tar.gz
nixlib-55abc327b49b4097e48c916e40803caa8cf46e8f.tar.bz2
nixlib-55abc327b49b4097e48c916e40803caa8cf46e8f.tar.lz
nixlib-55abc327b49b4097e48c916e40803caa8cf46e8f.tar.xz
nixlib-55abc327b49b4097e48c916e40803caa8cf46e8f.tar.zst
nixlib-55abc327b49b4097e48c916e40803caa8cf46e8f.zip
Merge branch 'nixos-unstable' of https://github.com/NixOS/nixpkgs
Conflicts:
	nixpkgs/pkgs/build-support/go/module.nix
Diffstat (limited to 'nixpkgs/nixos')
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md4
-rw-r--r--nixpkgs/nixos/modules/config/zram.nix49
-rw-r--r--nixpkgs/nixos/modules/image/amend-repart-definitions.py4
-rw-r--r--nixpkgs/nixos/modules/module-list.nix2
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/zsh.nix13
-rw-r--r--nixpkgs/nixos/modules/services/databases/influxdb2.nix452
-rw-r--r--nixpkgs/nixos/modules/services/editors/emacs.nix17
-rw-r--r--nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix66
-rw-r--r--nixpkgs/nixos/modules/services/networking/tailscale.nix2
-rw-r--r--nixpkgs/nixos/modules/services/networking/twingate.nix2
-rw-r--r--nixpkgs/nixos/modules/services/system/zram-generator.nix38
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/netbox.nix98
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/caddy/default.nix54
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/dwm.nix1
-rw-r--r--nixpkgs/nixos/modules/system/boot/binfmt.nix10
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl59
-rw-r--r--nixpkgs/nixos/tests/all-tests.nix8
-rw-r--r--nixpkgs/nixos/tests/caddy.nix22
-rw-r--r--nixpkgs/nixos/tests/common/lxd/config.yaml24
-rw-r--r--nixpkgs/nixos/tests/hddfancontrol.nix44
-rw-r--r--nixpkgs/nixos/tests/influxdb2.nix193
-rw-r--r--nixpkgs/nixos/tests/listmonk.nix23
-rw-r--r--nixpkgs/nixos/tests/lxd-image-server.nix6
-rw-r--r--nixpkgs/nixos/tests/lxd/container.nix (renamed from nixpkgs/nixos/tests/lxd.nix)38
-rw-r--r--nixpkgs/nixos/tests/lxd/default.nix9
-rw-r--r--nixpkgs/nixos/tests/lxd/nftables.nix (renamed from nixpkgs/nixos/tests/lxd-nftables.nix)2
-rw-r--r--nixpkgs/nixos/tests/lxd/ui.nix (renamed from nixpkgs/nixos/tests/lxd-ui.nix)2
-rw-r--r--nixpkgs/nixos/tests/os-prober.nix1
-rw-r--r--nixpkgs/nixos/tests/virtualbox.nix2
-rw-r--r--nixpkgs/nixos/tests/web-apps/netbox-upgrade.nix85
30 files changed, 1075 insertions, 255 deletions
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md
index 825b1c5bd407..623576ce4ff2 100644
--- a/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -20,6 +20,8 @@
 
 - [mautrix-whatsapp](https://docs.mau.fi/bridges/go/whatsapp/index.html) A Matrix-WhatsApp puppeting bridge
 
+- [hddfancontrol](https://github.com/desbma/hddfancontrol), a service to regulate fan speeds based on hard drive temperature. Available as [services.hddfancontrol](#opt-services.hddfancontrol.enable).
+
 - [GoToSocial](https://gotosocial.org/), an ActivityPub social network server, written in Golang. Available as [services.gotosocial](#opt-services.gotosocial.enable).
 
 - [Typesense](https://github.com/typesense/typesense), a fast, typo-tolerant search engine for building delightful search experiences. Available as [services.typesense](#opt-services.typesense.enable).
@@ -201,6 +203,8 @@ The module update takes care of the new config syntax and the data itself (user
 
 - `programs.gnupg.agent.pinentryFlavor` is now set in `/etc/gnupg/gpg-agent.conf`, and will no longer take precedence over a `pinentry-program` set in `~/.gnupg/gpg-agent.conf`.
 
+- `services.influxdb2` now supports doing an automatic initial setup and provisioning of users, organizations, buckets and authentication tokens, see [#249502](https://github.com/NixOS/nixpkgs/pull/249502) for more details.
+
 - `wrapHelm` now exposes `passthru.pluginsDir` which can be passed to `helmfile`. For convenience, a top-level package `helmfile-wrapped` has been added, which inherits `passthru.pluginsDir` from `kubernetes-helm-wrapped`. See [#217768](https://github.com/NixOS/nixpkgs/issues/217768) for details.
 
 - `boot.initrd.network.udhcp.enable` allows control over dhcp during stage 1 regardless of what `networking.useDHCP` is set to.
diff --git a/nixpkgs/nixos/modules/config/zram.nix b/nixpkgs/nixos/modules/config/zram.nix
index 991387ea9b2b..ec8b4ed6e931 100644
--- a/nixpkgs/nixos/modules/config/zram.nix
+++ b/nixpkgs/nixos/modules/config/zram.nix
@@ -105,36 +105,25 @@ in
       }
     ];
 
-
-    system.requiredKernelConfig = with config.lib.kernelConfig; [
-      (isModule "ZRAM")
-    ];
-
-    # Disabling this for the moment, as it would create and mkswap devices twice,
-    # once in stage 2 boot, and again when the zram-reloader service starts.
-    # boot.kernelModules = [ "zram" ];
-
-    systemd.packages = [ pkgs.zram-generator ];
-    systemd.services."systemd-zram-setup@".path = [ pkgs.util-linux ]; # for mkswap
-
-    environment.etc."systemd/zram-generator.conf".source =
-      (pkgs.formats.ini { }).generate "zram-generator.conf" (lib.listToAttrs
-        (builtins.map
-          (dev: {
-            name = dev;
-            value =
-              let
-                size = "${toString cfg.memoryPercent} / 100 * ram";
-              in
-              {
-                zram-size = if cfg.memoryMax != null then "min(${size}, ${toString cfg.memoryMax} / 1024 / 1024)" else size;
-                compression-algorithm = cfg.algorithm;
-                swap-priority = cfg.priority;
-              } // lib.optionalAttrs (cfg.writebackDevice != null) {
-                writeback-device = cfg.writebackDevice;
-              };
-          })
-          devices));
+    services.zram-generator.enable = true;
+
+    services.zram-generator.settings = lib.listToAttrs
+      (builtins.map
+        (dev: {
+          name = dev;
+          value =
+            let
+              size = "${toString cfg.memoryPercent} / 100 * ram";
+            in
+            {
+              zram-size = if cfg.memoryMax != null then "min(${size}, ${toString cfg.memoryMax} / 1024 / 1024)" else size;
+              compression-algorithm = cfg.algorithm;
+              swap-priority = cfg.priority;
+            } // lib.optionalAttrs (cfg.writebackDevice != null) {
+              writeback-device = cfg.writebackDevice;
+            };
+        })
+        devices);
 
   };
 
diff --git a/nixpkgs/nixos/modules/image/amend-repart-definitions.py b/nixpkgs/nixos/modules/image/amend-repart-definitions.py
index 52f10303eb5e..fa9b1544ae85 100644
--- a/nixpkgs/nixos/modules/image/amend-repart-definitions.py
+++ b/nixpkgs/nixos/modules/image/amend-repart-definitions.py
@@ -53,7 +53,7 @@ def add_closure_to_definition(
 
             source = Path(line.strip())
             target = str(source.relative_to("/nix/store/"))
-            target = f":{target}" if strip_nix_store_prefix else ""
+            target = f":/{target}" if strip_nix_store_prefix else ""
 
             copy_files_lines.append(f"CopyFiles={source}{target}\n")
 
@@ -102,7 +102,7 @@ def main() -> None:
         add_contents_to_definition(definition, contents)
 
         closure = config.get("closure")
-        strip_nix_store_prefix = config.get("stripStorePaths")
+        strip_nix_store_prefix = config.get("stripNixStorePrefix")
         add_closure_to_definition(definition, closure, strip_nix_store_prefix)
 
     print(target_dir.absolute())
diff --git a/nixpkgs/nixos/modules/module-list.nix b/nixpkgs/nixos/modules/module-list.nix
index f05e28cd14bc..aec12ab21a75 100644
--- a/nixpkgs/nixos/modules/module-list.nix
+++ b/nixpkgs/nixos/modules/module-list.nix
@@ -506,6 +506,7 @@
   ./services/hardware/fancontrol.nix
   ./services/hardware/freefall.nix
   ./services/hardware/fwupd.nix
+  ./services/hardware/hddfancontrol.nix
   ./services/hardware/illum.nix
   ./services/hardware/interception-tools.nix
   ./services/hardware/irqbalance.nix
@@ -1172,6 +1173,7 @@
   ./services/system/self-deploy.nix
   ./services/system/systembus-notify.nix
   ./services/system/uptimed.nix
+  ./services/system/zram-generator.nix
   ./services/torrent/deluge.nix
   ./services/torrent/flexget.nix
   ./services/torrent/magnetico.nix
diff --git a/nixpkgs/nixos/modules/programs/zsh/zsh.nix b/nixpkgs/nixos/modules/programs/zsh/zsh.nix
index 6bb21cb3ef66..cad639f299c8 100644
--- a/nixpkgs/nixos/modules/programs/zsh/zsh.nix
+++ b/nixpkgs/nixos/modules/programs/zsh/zsh.nix
@@ -159,6 +159,14 @@ in
         type = types.bool;
       };
 
+      enableLsColors = mkOption {
+        default = true;
+        description = lib.mdDoc ''
+          Enable extra colors in directory listings (used by `ls` and `tree`).
+        '';
+        type = types.bool;
+      };
+
     };
 
   };
@@ -263,6 +271,11 @@ in
 
         ${cfg.interactiveShellInit}
 
+        ${optionalString cfg.enableLsColors ''
+          # Extra colors for directory listings.
+          eval "$(${pkgs.coreutils}/bin/dircolors -b)"
+        ''}
+
         # Setup aliases.
         ${zshAliases}
 
diff --git a/nixpkgs/nixos/modules/services/databases/influxdb2.nix b/nixpkgs/nixos/modules/services/databases/influxdb2.nix
index 329533b35dc8..3740cd01b5dc 100644
--- a/nixpkgs/nixos/modules/services/databases/influxdb2.nix
+++ b/nixpkgs/nixos/modules/services/databases/influxdb2.nix
@@ -3,34 +3,291 @@
 let
   inherit
     (lib)
+    any
+    attrNames
+    attrValues
+    count
     escapeShellArg
+    filterAttrs
+    flatten
+    flip
+    getExe
     hasAttr
+    hasInfix
+    listToAttrs
     literalExpression
+    mapAttrsToList
+    mdDoc
     mkEnableOption
     mkIf
     mkOption
+    nameValuePair
+    optional
+    subtractLists
     types
+    unique
     ;
 
   format = pkgs.formats.json { };
   cfg = config.services.influxdb2;
   configFile = format.generate "config.json" cfg.settings;
+
+  validPermissions = [
+    "authorizations"
+    "buckets"
+    "dashboards"
+    "orgs"
+    "tasks"
+    "telegrafs"
+    "users"
+    "variables"
+    "secrets"
+    "labels"
+    "views"
+    "documents"
+    "notificationRules"
+    "notificationEndpoints"
+    "checks"
+    "dbrp"
+    "annotations"
+    "sources"
+    "scrapers"
+    "notebooks"
+    "remotes"
+    "replications"
+  ];
+
+  # Determines whether at least one active api token is defined
+  anyAuthDefined =
+    flip any (attrValues cfg.provision.organizations)
+    (o: o.present && flip any (attrValues o.auths)
+    (a: a.present && a.tokenFile != null));
+
+  provisionState = pkgs.writeText "provision_state.json" (builtins.toJSON {
+    inherit (cfg.provision) organizations users;
+  });
+
+  provisioningScript = pkgs.writeShellScript "post-start-provision" ''
+    set -euo pipefail
+    export INFLUX_HOST="http://"${escapeShellArg (
+      if ! hasAttr "http-bind-address" cfg.settings
+        || hasInfix "0.0.0.0" cfg.settings.http-bind-address
+      then "localhost:8086"
+      else cfg.settings.http-bind-address
+    )}
+
+    # Wait for the influxdb server to come online
+    count=0
+    while ! influx ping &>/dev/null; do
+      if [ "$count" -eq 300 ]; then
+        echo "Tried for 30 seconds, giving up..."
+        exit 1
+      fi
+
+      if ! kill -0 "$MAINPID"; then
+        echo "Main server died, giving up..."
+        exit 1
+      fi
+
+      sleep 0.1
+      count=$((count++))
+    done
+
+    # Do the initial database setup. Pass /dev/null as configs-path to
+    # avoid saving the token as the active config.
+    if test -e "$STATE_DIRECTORY/.first_startup"; then
+      influx setup \
+        --configs-path /dev/null \
+        --org ${escapeShellArg cfg.provision.initialSetup.organization} \
+        --bucket ${escapeShellArg cfg.provision.initialSetup.bucket} \
+        --username ${escapeShellArg cfg.provision.initialSetup.username} \
+        --password "$(< "$CREDENTIALS_DIRECTORY/admin-password")" \
+        --token "$(< "$CREDENTIALS_DIRECTORY/admin-token")" \
+        --retention ${toString cfg.provision.initialSetup.retention}s \
+        --force >/dev/null
+
+      rm -f "$STATE_DIRECTORY/.first_startup"
+    fi
+
+    provision_result=$(${getExe pkgs.influxdb2-provision} ${provisionState} "$INFLUX_HOST" "$(< "$CREDENTIALS_DIRECTORY/admin-token")")
+    if [[ "$(jq '[.auths[] | select(.action == "created")] | length' <<< "$provision_result")" -gt 0 ]]; then
+      echo "Created at least one new token, queueing service restart so we can manipulate secrets"
+      touch "$STATE_DIRECTORY/.needs_restart"
+    fi
+  '';
+
+  restarterScript = pkgs.writeShellScript "post-start-restarter" ''
+    set -euo pipefail
+    if test -e "$STATE_DIRECTORY/.needs_restart"; then
+      rm -f "$STATE_DIRECTORY/.needs_restart"
+      /run/current-system/systemd/bin/systemctl restart influxdb2
+    fi
+  '';
+
+  organizationSubmodule = types.submodule (organizationSubmod: let
+    org = organizationSubmod.config._module.args.name;
+  in {
+    options = {
+      present = mkOption {
+        description = mdDoc "Whether to ensure that this organization is present or absent.";
+        type = types.bool;
+        default = true;
+      };
+
+      description = mkOption {
+        description = mdDoc "Optional description for the organization.";
+        default = null;
+        type = types.nullOr types.str;
+      };
+
+      buckets = mkOption {
+        description = mdDoc "Buckets to provision in this organization.";
+        default = {};
+        type = types.attrsOf (types.submodule (bucketSubmod: let
+          bucket = bucketSubmod.config._module.args.name;
+        in {
+          options = {
+            present = mkOption {
+              description = mdDoc "Whether to ensure that this bucket is present or absent.";
+              type = types.bool;
+              default = true;
+            };
+
+            description = mkOption {
+              description = mdDoc "Optional description for the bucket.";
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            retention = mkOption {
+              type = types.ints.unsigned;
+              default = 0;
+              description = mdDoc "The duration in seconds for which the bucket will retain data (0 is infinite).";
+            };
+          };
+        }));
+      };
+
+      auths = mkOption {
+        description = mdDoc "API tokens to provision for the user in this organization.";
+        default = {};
+        type = types.attrsOf (types.submodule (authSubmod: let
+          auth = authSubmod.config._module.args.name;
+        in {
+          options = {
+            id = mkOption {
+              description = mdDoc "A unique identifier for this authentication token. Since influx doesn't store names for tokens, this will be hashed and appended to the description to identify the token.";
+              readOnly = true;
+              default = builtins.substring 0 32 (builtins.hashString "sha256" "${org}:${auth}");
+              defaultText = "<a hash derived from org and name>";
+              type = types.str;
+            };
+
+            present = mkOption {
+              description = mdDoc "Whether to ensure that this user is present or absent.";
+              type = types.bool;
+              default = true;
+            };
+
+            description = mkOption {
+              description = ''
+                Optional description for the API token.
+                Note that the actual token will always be created with a descriptionregardless
+                of whether this is given or not. The name is always added plus a unique suffix
+                to later identify the token to track whether it has already been created.
+              '';
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            tokenFile = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = mdDoc "The token value. If not given, influx will automatically generate one.";
+            };
+
+            operator = mkOption {
+              description = mdDoc "Grants all permissions in all organizations.";
+              default = false;
+              type = types.bool;
+            };
+
+            allAccess = mkOption {
+              description = mdDoc "Grants all permissions in the associated organization.";
+              default = false;
+              type = types.bool;
+            };
+
+            readPermissions = mkOption {
+              description = mdDoc ''
+                The read permissions to include for this token. Access is usually granted only
+                for resources in the associated organization.
+
+                Available permissions are `authorizations`, `buckets`, `dashboards`,
+                `orgs`, `tasks`, `telegrafs`, `users`, `variables`, `secrets`, `labels`, `views`,
+                `documents`, `notificationRules`, `notificationEndpoints`, `checks`, `dbrp`,
+                `annotations`, `sources`, `scrapers`, `notebooks`, `remotes`, `replications`.
+
+                Refer to `influx auth create --help` for a full list with descriptions.
+
+                `buckets` grants read access to all associated buckets. Use `readBuckets` to define
+                more granular access permissions.
+              '';
+              default = [];
+              type = types.listOf (types.enum validPermissions);
+            };
+
+            writePermissions = mkOption {
+              description = mdDoc ''
+                The read permissions to include for this token. Access is usually granted only
+                for resources in the associated organization.
+
+                Available permissions are `authorizations`, `buckets`, `dashboards`,
+                `orgs`, `tasks`, `telegrafs`, `users`, `variables`, `secrets`, `labels`, `views`,
+                `documents`, `notificationRules`, `notificationEndpoints`, `checks`, `dbrp`,
+                `annotations`, `sources`, `scrapers`, `notebooks`, `remotes`, `replications`.
+
+                Refer to `influx auth create --help` for a full list with descriptions.
+
+                `buckets` grants write access to all associated buckets. Use `writeBuckets` to define
+                more granular access permissions.
+              '';
+              default = [];
+              type = types.listOf (types.enum validPermissions);
+            };
+
+            readBuckets = mkOption {
+              description = mdDoc "The organization's buckets which should be allowed to be read";
+              default = [];
+              type = types.listOf types.str;
+            };
+
+            writeBuckets = mkOption {
+              description = mdDoc "The organization's buckets which should be allowed to be written";
+              default = [];
+              type = types.listOf types.str;
+            };
+          };
+        }));
+      };
+    };
+  });
 in
 {
   options = {
     services.influxdb2 = {
-      enable = mkEnableOption (lib.mdDoc "the influxdb2 server");
+      enable = mkEnableOption (mdDoc "the influxdb2 server");
 
       package = mkOption {
         default = pkgs.influxdb2-server;
         defaultText = literalExpression "pkgs.influxdb2";
-        description = lib.mdDoc "influxdb2 derivation to use.";
+        description = mdDoc "influxdb2 derivation to use.";
         type = types.package;
       };
 
       settings = mkOption {
         default = { };
-        description = lib.mdDoc ''configuration options for influxdb2, see <https://docs.influxdata.com/influxdb/v2.0/reference/config-options> for details.'';
+        description = mdDoc ''configuration options for influxdb2, see <https://docs.influxdata.com/influxdb/v2.0/reference/config-options> for details.'';
         type = format.type;
       };
 
@@ -41,52 +298,135 @@ in
           organization = mkOption {
             type = types.str;
             example = "main";
-            description = "Primary organization name";
+            description = mdDoc "Primary organization name";
           };
 
           bucket = mkOption {
             type = types.str;
             example = "example";
-            description = "Primary bucket name";
+            description = mdDoc "Primary bucket name";
           };
 
           username = mkOption {
             type = types.str;
             default = "admin";
-            description = "Primary username";
+            description = mdDoc "Primary username";
           };
 
           retention = mkOption {
-            type = types.str;
-            default = "0";
-            description = ''
-              The duration for which the bucket will retain data (0 is infinite).
-              Accepted units are `ns` (nanoseconds), `us` or `µs` (microseconds), `ms` (milliseconds),
-              `s` (seconds), `m` (minutes), `h` (hours), `d` (days) and `w` (weeks).
-            '';
+            type = types.ints.unsigned;
+            default = 0;
+            description = mdDoc "The duration in seconds for which the bucket will retain data (0 is infinite).";
           };
 
           passwordFile = mkOption {
             type = types.path;
-            description = "Password for primary user. Don't use a file from the nix store!";
+            description = mdDoc "Password for primary user. Don't use a file from the nix store!";
           };
 
           tokenFile = mkOption {
             type = types.path;
-            description = "API Token to set for the admin user. Don't use a file from the nix store!";
+            description = mdDoc "API Token to set for the admin user. Don't use a file from the nix store!";
           };
         };
+
+        organizations = mkOption {
+          description = mdDoc "Organizations to provision.";
+          example = literalExpression ''
+            {
+              myorg = {
+                description = "My organization";
+                buckets.mybucket = {
+                  description = "My bucket";
+                  retention = 31536000; # 1 year
+                };
+                auths.mytoken = {
+                  readBuckets = ["mybucket"];
+                  tokenFile = "/run/secrets/mytoken";
+                };
+              };
+            }
+          '';
+          default = {};
+          type = types.attrsOf organizationSubmodule;
+        };
+
+        users = mkOption {
+          description = mdDoc "Users to provision.";
+          default = {};
+          example = literalExpression ''
+            {
+              # admin = {}; /* The initialSetup.username will automatically be added. */
+              myuser.passwordFile = "/run/secrets/myuser_password";
+            }
+          '';
+          type = types.attrsOf (types.submodule (userSubmod: let
+            user = userSubmod.config._module.args.name;
+            org = userSubmod.config.org;
+          in {
+            options = {
+              present = mkOption {
+                description = mdDoc "Whether to ensure that this user is present or absent.";
+                type = types.bool;
+                default = true;
+              };
+
+              passwordFile = mkOption {
+                description = mdDoc "Password for the user. If unset, the user will not be able to log in until a password is set by an operator! Don't use a file from the nix store!";
+                default = null;
+                type = types.nullOr types.path;
+              };
+            };
+          }));
+        };
       };
     };
   };
 
   config = mkIf cfg.enable {
-    assertions = [
-      {
-        assertion = !(hasAttr "bolt-path" cfg.settings) && !(hasAttr "engine-path" cfg.settings);
-        message = "services.influxdb2.config: bolt-path and engine-path should not be set as they are managed by systemd";
-      }
-    ];
+    assertions =
+      [
+        {
+          assertion = !(hasAttr "bolt-path" cfg.settings) && !(hasAttr "engine-path" cfg.settings);
+          message = "services.influxdb2.config: bolt-path and engine-path should not be set as they are managed by systemd";
+        }
+      ]
+      ++ flatten (flip mapAttrsToList cfg.provision.organizations (orgName: org:
+        flip mapAttrsToList org.auths (authName: auth:
+          [
+            {
+              assertion = 1 == count (x: x) [
+                auth.operator
+                auth.allAccess
+                (auth.readPermissions != []
+                  || auth.writePermissions != []
+                  || auth.readBuckets != []
+                  || auth.writeBuckets != [])
+              ];
+              message = "influxdb2: provision.organizations.${orgName}.auths.${authName}: The `operator` and `allAccess` options are mutually exclusive with each other and the granular permission settings.";
+            }
+            (let unknownBuckets = subtractLists (attrNames org.buckets) auth.readBuckets; in {
+              assertion = unknownBuckets == [];
+              message = "influxdb2: provision.organizations.${orgName}.auths.${authName}: Refers to invalid buckets in readBuckets: ${toString unknownBuckets}";
+            })
+            (let unknownBuckets = subtractLists (attrNames org.buckets) auth.writeBuckets; in {
+              assertion = unknownBuckets == [];
+              message = "influxdb2: provision.organizations.${orgName}.auths.${authName}: Refers to invalid buckets in writeBuckets: ${toString unknownBuckets}";
+            })
+          ]
+        )
+      ));
+
+    services.influxdb2.provision = mkIf cfg.provision.enable {
+      organizations.${cfg.provision.initialSetup.organization} = {
+        buckets.${cfg.provision.initialSetup.bucket} = {
+          inherit (cfg.provision.initialSetup) retention;
+        };
+      };
+      users.${cfg.provision.initialSetup.username} = {
+        inherit (cfg.provision.initialSetup) passwordFile;
+      };
+    };
 
     systemd.services.influxdb2 = {
       description = "InfluxDB is an open-source, distributed, time series database";
@@ -111,58 +451,38 @@ in
           "admin-password:${cfg.provision.initialSetup.passwordFile}"
           "admin-token:${cfg.provision.initialSetup.tokenFile}"
         ];
+
+        ExecStartPost = mkIf cfg.provision.enable (
+          [provisioningScript] ++
+          # Only the restarter runs with elevated privileges
+          optional anyAuthDefined "+${restarterScript}"
+        );
       };
 
-      path = [pkgs.influxdb2-cli];
+      path = [
+        pkgs.influxdb2-cli
+        pkgs.jq
+      ];
 
-      # Mark if this is the first startup so postStart can do the initial setup
-      preStart = mkIf cfg.provision.enable ''
+      # Mark if this is the first startup so postStart can do the initial setup.
+      # Also extract any token secret mappings and apply them if this isn't the first start.
+      preStart = let
+        tokenPaths = listToAttrs (flatten
+          # For all organizations
+          (flip mapAttrsToList cfg.provision.organizations
+            # For each contained token that has a token file
+            (_: org: flip mapAttrsToList (filterAttrs (_: x: x.tokenFile != null) org.auths)
+              # Collect id -> tokenFile for the mapping
+              (_: auth: nameValuePair auth.id auth.tokenFile))));
+        tokenMappings = pkgs.writeText "token_mappings.json" (builtins.toJSON tokenPaths);
+      in mkIf cfg.provision.enable ''
         if ! test -e "$STATE_DIRECTORY/influxd.bolt"; then
           touch "$STATE_DIRECTORY/.first_startup"
+        else
+          # Manipulate provisioned api tokens if necessary
+          ${getExe pkgs.influxdb2-token-manipulator} "$STATE_DIRECTORY/influxd.bolt" ${tokenMappings}
         fi
       '';
-
-      postStart = let
-        initCfg = cfg.provision.initialSetup;
-      in mkIf cfg.provision.enable (
-        ''
-          set -euo pipefail
-          export INFLUX_HOST="http://"${escapeShellArg (cfg.settings.http-bind-address or "localhost:8086")}
-
-          # Wait for the influxdb server to come online
-          count=0
-          while ! influx ping &>/dev/null; do
-            if [ "$count" -eq 300 ]; then
-              echo "Tried for 30 seconds, giving up..."
-              exit 1
-            fi
-
-            if ! kill -0 "$MAINPID"; then
-              echo "Main server died, giving up..."
-              exit 1
-            fi
-
-            sleep 0.1
-            count=$((count++))
-          done
-
-          # Do the initial database setup. Pass /dev/null as configs-path to
-          # avoid saving the token as the active config.
-          if test -e "$STATE_DIRECTORY/.first_startup"; then
-            influx setup \
-              --configs-path /dev/null \
-              --org ${escapeShellArg initCfg.organization} \
-              --bucket ${escapeShellArg initCfg.bucket} \
-              --username ${escapeShellArg initCfg.username} \
-              --password "$(< "$CREDENTIALS_DIRECTORY/admin-password")" \
-              --token "$(< "$CREDENTIALS_DIRECTORY/admin-token")" \
-              --retention ${escapeShellArg initCfg.retention} \
-              --force >/dev/null
-
-            rm -f "$STATE_DIRECTORY/.first_startup"
-          fi
-        ''
-      );
     };
 
     users.extraUsers.influxdb2 = {
diff --git a/nixpkgs/nixos/modules/services/editors/emacs.nix b/nixpkgs/nixos/modules/services/editors/emacs.nix
index fe3a10159794..fad4f39ff210 100644
--- a/nixpkgs/nixos/modules/services/editors/emacs.nix
+++ b/nixpkgs/nixos/modules/services/editors/emacs.nix
@@ -80,6 +80,15 @@ in
         using the EDITOR environment variable.
       '';
     };
+
+    startWithGraphical = mkOption {
+      type = types.bool;
+      default = config.services.xserver.enable;
+      defaultText = literalExpression "config.services.xserver.enable";
+      description = lib.mdDoc ''
+        Start emacs with the graphical session instead of any session. Without this, emacs clients will not be able to create frames in the graphical session.
+      '';
+    };
   };
 
   config = mkIf (cfg.enable || cfg.install) {
@@ -92,7 +101,13 @@ in
         ExecStop = "${cfg.package}/bin/emacsclient --eval (kill-emacs)";
         Restart = "always";
       };
-    } // optionalAttrs cfg.enable { wantedBy = [ "default.target" ]; };
+
+      unitConfig = optionalAttrs cfg.startWithGraphical {
+        After = "graphical-session.target";
+      };
+    } // optionalAttrs cfg.enable {
+      wantedBy = if cfg.startWithGraphical then [ "graphical-session.target" ] else [ "default.target" ];
+    };
 
     environment.systemPackages = [ cfg.package editorScript desktopApplicationFile ];
 
diff --git a/nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix b/nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix
new file mode 100644
index 000000000000..f472b5774cbf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.hddfancontrol;
+  types = lib.types;
+in
+
+{
+  options = {
+
+    services.hddfancontrol.enable = lib.mkEnableOption (lib.mdDoc "hddfancontrol daemon");
+
+    services.hddfancontrol.disks = lib.mkOption {
+      type = with types; listOf path;
+      default = [];
+      description = lib.mdDoc ''
+        Drive(s) to get temperature from
+      '';
+      example = ["/dev/sda"];
+    };
+
+    services.hddfancontrol.pwmPaths = lib.mkOption {
+      type = with types; listOf path;
+      default = [];
+      description = lib.mdDoc ''
+        PWM filepath(s) to control fan speed (under /sys)
+      '';
+      example = ["/sys/class/hwmon/hwmon2/pwm1"];
+    };
+
+    services.hddfancontrol.smartctl = lib.mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Probe temperature using smartctl instead of hddtemp or hdparm
+      '';
+    };
+
+    services.hddfancontrol.extraArgs = lib.mkOption {
+      type = with types; listOf str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra commandline arguments for hddfancontrol
+      '';
+      example = ["--pwm-start-value=32"
+                 "--pwm-stop-value=0"
+                 "--spin-down-time=900"];
+    };
+  };
+
+  config = lib.mkIf cfg.enable (
+    let args = lib.concatLists [
+      ["-d"] cfg.disks
+      ["-p"] cfg.pwmPaths
+      (lib.optional cfg.smartctl "--smartctl")
+      cfg.extraArgs
+    ]; in {
+      systemd.packages = [pkgs.hddfancontrol];
+
+      systemd.services.hddfancontrol = {
+        wantedBy = [ "multi-user.target" ];
+        environment.HDDFANCONTROL_ARGS = lib.escapeShellArgs args;
+      };
+    }
+  );
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tailscale.nix b/nixpkgs/nixos/modules/services/networking/tailscale.nix
index f308b7e33114..8b35cc8d6669 100644
--- a/nixpkgs/nixos/modules/services/networking/tailscale.nix
+++ b/nixpkgs/nixos/modules/services/networking/tailscale.nix
@@ -6,7 +6,7 @@ let
   cfg = config.services.tailscale;
   isNetworkd = config.networking.useNetworkd;
 in {
-  meta.maintainers = with maintainers; [ danderson mbaillie twitchyliquid64 ];
+  meta.maintainers = with maintainers; [ danderson mbaillie twitchyliquid64 mfrw ];
 
   options.services.tailscale = {
     enable = mkEnableOption (lib.mdDoc "Tailscale client daemon");
diff --git a/nixpkgs/nixos/modules/services/networking/twingate.nix b/nixpkgs/nixos/modules/services/networking/twingate.nix
index 170d392bf213..03c68fc874f0 100644
--- a/nixpkgs/nixos/modules/services/networking/twingate.nix
+++ b/nixpkgs/nixos/modules/services/networking/twingate.nix
@@ -17,7 +17,7 @@ in
     };
 
     networking.firewall.checkReversePath = lib.mkDefault "loose";
-    services.resolved.enable = !(config.networking.networkmanager.enable);
+    services.resolved.enable = lib.mkIf (!config.networking.networkmanager.enable) true;
 
     environment.systemPackages = [ cfg.package ]; # For the CLI.
   };
diff --git a/nixpkgs/nixos/modules/services/system/zram-generator.nix b/nixpkgs/nixos/modules/services/system/zram-generator.nix
new file mode 100644
index 000000000000..5902eda55696
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/zram-generator.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.zram-generator;
+  settingsFormat = pkgs.formats.ini { };
+in
+{
+  meta = {
+    maintainers = with lib.maintainers; [ nickcao ];
+  };
+
+  options.services.zram-generator = {
+    enable = lib.mkEnableOption (lib.mdDoc "Systemd unit generator for zram devices");
+
+    package = lib.mkPackageOptionMD pkgs "zram-generator" { };
+
+    settings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Configuration for zram-generator,
+        see https://github.com/systemd/zram-generator for documentation.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isModule "ZRAM")
+    ];
+
+    systemd.packages = [ cfg.package ];
+    systemd.services."systemd-zram-setup@".path = [ pkgs.util-linux ]; # for mkswap
+
+    environment.etc."systemd/zram-generator.conf".source = settingsFormat.generate "zram-generator.conf" cfg.settings;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/netbox.nix b/nixpkgs/nixos/modules/services/web-apps/netbox.nix
index e2ef350ba4e5..5f42f42a9af9 100644
--- a/nixpkgs/nixos/modules/services/web-apps/netbox.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/netbox.nix
@@ -1,7 +1,5 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.services.netbox;
   pythonFmt = pkgs.formats.pythonVars {};
@@ -17,7 +15,7 @@ let
   pkg = (cfg.package.overrideAttrs (old: {
     installPhase = old.installPhase + ''
       ln -s ${configFile} $out/opt/netbox/netbox/netbox/configuration.py
-    '' + optionalString cfg.enableLdap ''
+    '' + lib.optionalString cfg.enableLdap ''
       ln -s ${cfg.ldapConfigPath} $out/opt/netbox/netbox/netbox/ldap_config.py
     '';
   })).override {
@@ -31,7 +29,7 @@ let
 
 in {
   options.services.netbox = {
-    enable = mkOption {
+    enable = lib.mkOption {
       type = lib.types.bool;
       default = false;
       description = lib.mdDoc ''
@@ -66,18 +64,18 @@ in {
       };
     };
 
-    listenAddress = mkOption {
-      type = types.str;
+    listenAddress = lib.mkOption {
+      type = lib.types.str;
       default = "[::1]";
       description = lib.mdDoc ''
         Address the server will listen on.
       '';
     };
 
-    package = mkOption {
-      type = types.package;
-      default = if versionAtLeast config.system.stateVersion "23.05" then pkgs.netbox else pkgs.netbox_3_3;
-      defaultText = literalExpression ''
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = if lib.versionAtLeast config.system.stateVersion "23.05" then pkgs.netbox else pkgs.netbox_3_3;
+      defaultText = lib.literalExpression ''
         if versionAtLeast config.system.stateVersion "23.05" then pkgs.netbox else pkgs.netbox_3_3;
       '';
       description = lib.mdDoc ''
@@ -85,18 +83,18 @@ in {
       '';
     };
 
-    port = mkOption {
-      type = types.port;
+    port = lib.mkOption {
+      type = lib.types.port;
       default = 8001;
       description = lib.mdDoc ''
         Port the server will listen on.
       '';
     };
 
-    plugins = mkOption {
-      type = types.functionTo (types.listOf types.package);
+    plugins = lib.mkOption {
+      type = with lib.types; functionTo (listOf package);
       default = _: [];
-      defaultText = literalExpression ''
+      defaultText = lib.literalExpression ''
         python3Packages: with python3Packages; [];
       '';
       description = lib.mdDoc ''
@@ -104,23 +102,23 @@ in {
       '';
     };
 
-    dataDir = mkOption {
-      type = types.str;
+    dataDir = lib.mkOption {
+      type = lib.types.str;
       default = "/var/lib/netbox";
       description = lib.mdDoc ''
         Storage path of netbox.
       '';
     };
 
-    secretKeyFile = mkOption {
-      type = types.path;
+    secretKeyFile = lib.mkOption {
+      type = lib.types.path;
       description = lib.mdDoc ''
         Path to a file containing the secret key.
       '';
     };
 
-    extraConfig = mkOption {
-      type = types.lines;
+    extraConfig = lib.mkOption {
+      type = lib.types.lines;
       default = "";
       description = lib.mdDoc ''
         Additional lines of configuration appended to the `configuration.py`.
@@ -128,8 +126,8 @@ in {
       '';
     };
 
-    enableLdap = mkOption {
-      type = types.bool;
+    enableLdap = lib.mkOption {
+      type = lib.types.bool;
       default = false;
       description = lib.mdDoc ''
         Enable LDAP-Authentication for Netbox.
@@ -138,8 +136,8 @@ in {
       '';
     };
 
-    ldapConfigPath = mkOption {
-      type = types.path;
+    ldapConfigPath = lib.mkOption {
+      type = lib.types.path;
       default = "";
       description = lib.mdDoc ''
         Path to the Configuration-File for LDAP-Authentication, will be loaded as `ldap_config.py`.
@@ -173,15 +171,17 @@ in {
     };
   };
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     services.netbox = {
-      plugins = mkIf cfg.enableLdap (ps: [ ps.django-auth-ldap ]);
+      plugins = lib.mkIf cfg.enableLdap (ps: [ ps.django-auth-ldap ]);
       settings = {
         STATIC_ROOT = staticDir;
         MEDIA_ROOT = "${cfg.dataDir}/media";
         REPORTS_ROOT = "${cfg.dataDir}/reports";
         SCRIPTS_ROOT = "${cfg.dataDir}/scripts";
 
+        GIT_PATH = "${pkgs.gitMinimal}/bin/git";
+
         DATABASE = {
           NAME = "netbox";
           USER = "netbox";
@@ -264,40 +264,40 @@ in {
         RestartSec = 30;
       };
     in {
-      netbox-migration = {
-        description = "NetBox migrations";
-        wantedBy = [ "netbox.target" ];
-
-        environment = {
-          PYTHONPATH = pkg.pythonPath;
-        };
-
-        serviceConfig = defaultServiceConfig // {
-          Type = "oneshot";
-          ExecStart = ''
-            ${pkg}/bin/netbox migrate
-          '';
-          PrivateTmp = true;
-        };
-      };
-
       netbox = {
         description = "NetBox WSGI Service";
         documentation = [ "https://docs.netbox.dev/" ];
 
         wantedBy = [ "netbox.target" ];
 
-        after = [ "network-online.target" "netbox-migration.service" ];
+        after = [ "network-online.target" ];
         wants = [ "network-online.target" ];
 
+        environment.PYTHONPATH = pkg.pythonPath;
+
         preStart = ''
+          # On the first run, or on upgrade / downgrade, run migrations and related.
+          # This mostly correspond to upstream NetBox's 'upgrade.sh' script.
+          versionFile="${cfg.dataDir}/version"
+
+          if [[ -e "$versionFile" && "$(cat "$versionFile")" == "${cfg.package.version}" ]]; then
+            exit 0
+          fi
+
+          ${pkg}/bin/netbox migrate
           ${pkg}/bin/netbox trace_paths --no-input
           ${pkg}/bin/netbox collectstatic --no-input
           ${pkg}/bin/netbox remove_stale_contenttypes --no-input
+          # TODO: remove the condition when we remove netbox_3_3
+          ${lib.optionalString
+            (lib.versionAtLeast cfg.package.version "3.5.0")
+            "${pkg}/bin/netbox reindex --lazy"}
+          ${pkg}/bin/netbox clearsessions
+          ${pkg}/bin/netbox clearcache
+
+          echo "${cfg.package.version}" > "$versionFile"
         '';
 
-        environment.PYTHONPATH = pkg.pythonPath;
-
         serviceConfig = defaultServiceConfig // {
           ExecStart = ''
             ${pkgs.python3Packages.gunicorn}/bin/gunicorn netbox.wsgi \
@@ -331,7 +331,7 @@ in {
 
         wantedBy = [ "multi-user.target" ];
 
-        after = [ "network-online.target" ];
+        after = [ "network-online.target" "netbox.service" ];
         wants = [ "network-online.target" ];
 
         environment.PYTHONPATH = pkg.pythonPath;
@@ -351,7 +351,7 @@ in {
 
       wantedBy = [ "multi-user.target" ];
 
-      after = [ "network-online.target" ];
+      after = [ "network-online.target" "netbox.service" ];
       wants = [ "network-online.target" ];
 
       timerConfig = {
diff --git a/nixpkgs/nixos/modules/services/web-servers/caddy/default.nix b/nixpkgs/nixos/modules/services/web-servers/caddy/default.nix
index 5cc9ef6dd6d9..cec0b379f67a 100644
--- a/nixpkgs/nixos/modules/services/web-servers/caddy/default.nix
+++ b/nixpkgs/nixos/modules/services/web-servers/caddy/default.nix
@@ -24,21 +24,26 @@ let
         }
       '';
 
-  configFile =
-    let
-      Caddyfile = pkgs.writeTextDir "Caddyfile" ''
-        {
-          ${cfg.globalConfig}
-        }
-        ${cfg.extraConfig}
-      '';
+  settingsFormat = pkgs.formats.json { };
 
-      Caddyfile-formatted = pkgs.runCommand "Caddyfile-formatted" { nativeBuildInputs = [ cfg.package ]; } ''
-        mkdir -p $out
-        cp --no-preserve=mode ${Caddyfile}/Caddyfile $out/Caddyfile
-        caddy fmt --overwrite $out/Caddyfile
-      '';
-    in
+  configFile =
+    if cfg.settings != { } then
+      settingsFormat.generate "caddy.json" cfg.settings
+    else
+      let
+        Caddyfile = pkgs.writeTextDir "Caddyfile" ''
+          {
+            ${cfg.globalConfig}
+          }
+          ${cfg.extraConfig}
+        '';
+
+        Caddyfile-formatted = pkgs.runCommand "Caddyfile-formatted" { nativeBuildInputs = [ cfg.package ]; } ''
+          mkdir -p $out
+          cp --no-preserve=mode ${Caddyfile}/Caddyfile $out/Caddyfile
+          caddy fmt --overwrite $out/Caddyfile
+        '';
+      in
       "${if pkgs.stdenv.buildPlatform == pkgs.stdenv.hostPlatform then Caddyfile-formatted else Caddyfile}/Caddyfile";
 
   etcConfigFile = "caddy/caddy_config";
@@ -299,6 +304,27 @@ in
         which could delay the reload essentially indefinitely.
       '';
     };
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      default = {};
+      description = lib.mdDoc ''
+        Structured configuration for Caddy to generate a Caddy JSON configuration file.
+        See <https://caddyserver.com/docs/json/> for available options.
+
+        ::: {.warning}
+        Using a [Caddyfile](https://caddyserver.com/docs/caddyfile) instead of a JSON config is highly recommended by upstream.
+        There are only very few exception to this.
+
+        Please use a Caddyfile via {option}`services.caddy.configFile`, {option}`services.caddy.virtualHosts` or
+        {option}`services.caddy.extraConfig` with {option}`services.caddy.globalConfig` instead.
+        :::
+
+        ::: {.note}
+        Takes presence over most `services.caddy.*` options, such as {option}`services.caddy.configFile` and {option}`services.caddy.virtualHosts`, if specified.
+        :::
+      '';
+    };
   };
 
   # implementation
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/dwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/dwm.nix
index e114f2e26b17..82900fd30540 100644
--- a/nixpkgs/nixos/modules/services/x11/window-managers/dwm.nix
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/dwm.nix
@@ -45,6 +45,7 @@ in
       { name = "dwm";
         start =
           ''
+            export _JAVA_AWT_WM_NONREPARENTING=1
             dwm &
             waitPID=$!
           '';
diff --git a/nixpkgs/nixos/modules/system/boot/binfmt.nix b/nixpkgs/nixos/modules/system/boot/binfmt.nix
index bf1688feb19e..5172371d0afb 100644
--- a/nixpkgs/nixos/modules/system/boot/binfmt.nix
+++ b/nixpkgs/nixos/modules/system/boot/binfmt.nix
@@ -137,14 +137,8 @@ let
       magicOrExtension = ''\x00asm'';
       mask = ''\xff\xff\xff\xff'';
     };
-    x86_64-windows = {
-      magicOrExtension = "exe";
-      recognitionType = "extension";
-    };
-    i686-windows = {
-      magicOrExtension = "exe";
-      recognitionType = "extension";
-    };
+    x86_64-windows.magicOrExtension = "MZ";
+    i686-windows.magicOrExtension = "MZ";
   };
 
 in {
diff --git a/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl b/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl
index a84e374624d1..d1e7a0cb8178 100644
--- a/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl
+++ b/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl
@@ -516,38 +516,53 @@ sub addEntry {
     $conf .= "}\n\n";
 }
 
+sub addGeneration {
+    my ($name, $nameSuffix, $path, $options, $current) = @_;
 
-# Add default entries.
-$conf .= "$extraEntries\n" if $extraEntriesBeforeNixOS;
+    # Do not search for grand children
+    my @links = sort (glob "$path/specialisation/*");
 
-addEntry("@distroName@ - Default", $defaultConfig, $entryOptions, 1);
+    if ($current != 1 && scalar(@links) != 0) {
+        $conf .= "submenu \"> $name$nameSuffix\" --class submenu {\n";
+    }
 
-$conf .= "$extraEntries\n" unless $extraEntriesBeforeNixOS;
+    addEntry("$name" . (scalar(@links) == 0 ? "" : " - Default") . $nameSuffix, $path, $options, $current);
 
-# Find all the children of the current default configuration
-# Do not search for grand children
-my @links = sort (glob "$defaultConfig/specialisation/*");
-foreach my $link (@links) {
+    # Find all the children of the current default configuration
+    # Do not search for grand children
+    foreach my $link (@links) {
 
-    my $entryName = "";
+        my $entryName = "";
 
-    my $cfgName = readFile("$link/configuration-name");
+        my $cfgName = readFile("$link/configuration-name");
 
-    my $date = strftime("%F", localtime(lstat($link)->mtime));
-    my $version =
-        -e "$link/nixos-version"
-        ? readFile("$link/nixos-version")
-        : basename((glob(dirname(Cwd::abs_path("$link/kernel")) . "/lib/modules/*"))[0]);
+        my $date = strftime("%F", localtime(lstat($link)->mtime));
+        my $version =
+            -e "$link/nixos-version"
+            ? readFile("$link/nixos-version")
+            : basename((glob(dirname(Cwd::abs_path("$link/kernel")) . "/lib/modules/*"))[0]);
 
-    if ($cfgName) {
-        $entryName = $cfgName;
-    } else {
-        my $linkname = basename($link);
-        $entryName = "($linkname - $date - $version)";
+        if ($cfgName) {
+            $entryName = $cfgName;
+        } else {
+            my $linkname = basename($link);
+            $entryName = "($linkname - $date - $version)";
+        }
+        addEntry("$name - $entryName", $link, "", 1);
+    }
+
+    if ($current != 1 && scalar(@links) != 0) {
+        $conf .= "}\n";
     }
-    addEntry("@distroName@ - $entryName", $link, "", 1);
 }
 
+# Add default entries.
+$conf .= "$extraEntries\n" if $extraEntriesBeforeNixOS;
+
+addGeneration("@distroName@", "", $defaultConfig, $entryOptions, 1);
+
+$conf .= "$extraEntries\n" unless $extraEntriesBeforeNixOS;
+
 my $grubBootPath = $grubBoot->path;
 # extraEntries could refer to @bootRoot@, which we have to substitute
 $conf =~ s/\@bootRoot\@/$grubBootPath/g;
@@ -577,7 +592,7 @@ sub addProfile {
             -e "$link/nixos-version"
             ? readFile("$link/nixos-version")
             : basename((glob(dirname(Cwd::abs_path("$link/kernel")) . "/lib/modules/*"))[0]);
-        addEntry("@distroName@ - Configuration " . nrFromGen($link) . " ($date - $version)", $link, $subEntryOptions, 0);
+        addGeneration("@distroName@ - Configuration " . nrFromGen($link), " ($date - $version)", $link, $subEntryOptions, 0);
     }
 
     $conf .= "}\n";
diff --git a/nixpkgs/nixos/tests/all-tests.nix b/nixpkgs/nixos/tests/all-tests.nix
index 6f17bd2cdd3b..19aaac694594 100644
--- a/nixpkgs/nixos/tests/all-tests.nix
+++ b/nixpkgs/nixos/tests/all-tests.nix
@@ -341,6 +341,7 @@ in {
   hbase2 = handleTest ./hbase.nix { package=pkgs.hbase2; };
   hbase_2_4 = handleTest ./hbase.nix { package=pkgs.hbase_2_4; };
   hbase3 = handleTest ./hbase.nix { package=pkgs.hbase3; };
+  hddfancontrol = handleTest ./hddfancontrol.nix {};
   hedgedoc = handleTest ./hedgedoc.nix {};
   herbstluftwm = handleTest ./herbstluftwm.nix {};
   homepage-dashboard = handleTest ./homepage-dashboard.nix {};
@@ -433,7 +434,7 @@ in {
   lightdm = handleTest ./lightdm.nix {};
   lighttpd = handleTest ./lighttpd.nix {};
   limesurvey = handleTest ./limesurvey.nix {};
-  listmonk = handleTest ./listmonk.nix {};
+  listmonk = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./listmonk.nix {};
   litestream = handleTest ./litestream.nix {};
   lldap = handleTest ./lldap.nix {};
   locate = handleTest ./locate.nix {};
@@ -442,10 +443,8 @@ in {
   loki = handleTest ./loki.nix {};
   luks = handleTest ./luks.nix {};
   lvm2 = handleTest ./lvm2 {};
-  lxd = handleTest ./lxd.nix {};
-  lxd-nftables = handleTest ./lxd-nftables.nix {};
+  lxd = handleTest ./lxd {};
   lxd-image-server = handleTest ./lxd-image-server.nix {};
-  lxd-ui = handleTest ./lxd-ui.nix {};
   #logstash = handleTest ./logstash.nix {};
   lorri = handleTest ./lorri/default.nix {};
   maddy = discoverTests (import ./maddy { inherit handleTest; });
@@ -525,6 +524,7 @@ in {
   networking.scripted = handleTest ./networking.nix { networkd = false; };
   netbox = handleTest ./web-apps/netbox.nix { inherit (pkgs) netbox; };
   netbox_3_3 = handleTest ./web-apps/netbox.nix { netbox = pkgs.netbox_3_3; };
+  netbox-upgrade = handleTest ./web-apps/netbox-upgrade.nix {};
   # TODO: put in networking.nix after the test becomes more complete
   networkingProxy = handleTest ./networking-proxy.nix {};
   nextcloud = handleTest ./nextcloud {};
diff --git a/nixpkgs/nixos/tests/caddy.nix b/nixpkgs/nixos/tests/caddy.nix
index 238091ec606f..5a0d3539394b 100644
--- a/nixpkgs/nixos/tests/caddy.nix
+++ b/nixpkgs/nixos/tests/caddy.nix
@@ -34,6 +34,20 @@ import ./make-test-python.nix ({ pkgs, ... }: {
           "http://localhost:8081" = { };
         };
       };
+      specialisation.rfc42.configuration = {
+        services.caddy.settings = {
+          apps.http.servers.default = {
+            listen = [ ":80" ];
+            routes = [{
+              handle = [{
+                body = "hello world";
+                handler = "static_response";
+                status_code = 200;
+              }];
+            }];
+          };
+        };
+      };
     };
   };
 
@@ -41,6 +55,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     let
       justReloadSystem = "${nodes.webserver.system.build.toplevel}/specialisation/config-reload";
       multipleConfigs = "${nodes.webserver.system.build.toplevel}/specialisation/multiple-configs";
+      rfc42Config = "${nodes.webserver.system.build.toplevel}/specialisation/rfc42";
     in
     ''
       url = "http://localhost/example.html"
@@ -62,5 +77,12 @@ import ./make-test-python.nix ({ pkgs, ... }: {
           )
           webserver.wait_for_open_port(8080)
           webserver.wait_for_open_port(8081)
+
+      with subtest("rfc42 settings config"):
+          webserver.succeed(
+              "${rfc42Config}/bin/switch-to-configuration test >&2"
+          )
+          webserver.wait_for_open_port(80)
+          webserver.succeed("curl http://localhost | grep hello")
     '';
 })
diff --git a/nixpkgs/nixos/tests/common/lxd/config.yaml b/nixpkgs/nixos/tests/common/lxd/config.yaml
deleted file mode 100644
index 3bb667ed43f7..000000000000
--- a/nixpkgs/nixos/tests/common/lxd/config.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-storage_pools:
-  - name: default
-    driver: dir
-    config:
-      source: /var/lxd-pool
-
-networks:
-  - name: lxdbr0
-    type: bridge
-    config:
-      ipv4.address: auto
-      ipv6.address: none
-
-profiles:
-  - name: default
-    devices:
-      eth0:
-        name: eth0
-        network: lxdbr0
-        type: nic
-      root:
-        path: /
-        pool: default
-        type: disk
diff --git a/nixpkgs/nixos/tests/hddfancontrol.nix b/nixpkgs/nixos/tests/hddfancontrol.nix
new file mode 100644
index 000000000000..b5fa7ccb2c19
--- /dev/null
+++ b/nixpkgs/nixos/tests/hddfancontrol.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "hddfancontrol";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ benley ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    services.hddfancontrol.enable = true;
+    services.hddfancontrol.disks = ["/dev/vda"];
+    services.hddfancontrol.pwmPaths = ["/test/hwmon1/pwm1"];
+    services.hddfancontrol.extraArgs = ["--pwm-start-value=32"
+                                        "--pwm-stop-value=0"];
+
+    systemd.services.hddfancontrol_fixtures = {
+      description = "Install test fixtures for hddfancontrol";
+      serviceConfig = {
+        Type = "oneshot";
+      };
+      script = ''
+        mkdir -p /test/hwmon1
+        echo 255 > /test/hwmon1/pwm1
+        echo 2 > /test/hwmon1/pwm1_enable
+      '';
+      wantedBy = ["hddfancontrol.service"];
+      before = ["hddfancontrol.service"];
+    };
+
+    systemd.services.hddfancontrol.serviceConfig.ReadWritePaths = "/test";
+  };
+
+  # hddfancontrol.service will fail to start because qemu /dev/vda doesn't have
+  # any thermal interfaces, but it should ensure that fans appear to be running
+  # before it aborts.
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("journalctl -eu hddfancontrol.service|grep 'Setting fan speed'")
+    machine.shutdown()
+
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/influxdb2.nix b/nixpkgs/nixos/tests/influxdb2.nix
index c9c54b788cc0..1631ac1d9408 100644
--- a/nixpkgs/nixos/tests/influxdb2.nix
+++ b/nixpkgs/nixos/tests/influxdb2.nix
@@ -6,6 +6,9 @@ import ./make-test-python.nix ({ pkgs, ...} : {
 
   nodes.machine = { lib, ... }: {
     environment.systemPackages = [ pkgs.influxdb2-cli ];
+    # Make sure that the service is restarted immediately if tokens need to be rewritten
+    # without relying on any Restart=on-failure behavior
+    systemd.services.influxdb2.serviceConfig.RestartSec = 6000;
     services.influxdb2.enable = true;
     services.influxdb2.provision = {
       enable = true;
@@ -15,22 +18,208 @@ import ./make-test-python.nix ({ pkgs, ...} : {
         passwordFile = pkgs.writeText "admin-pw" "ExAmPl3PA55W0rD";
         tokenFile = pkgs.writeText "admin-token" "verysecureadmintoken";
       };
+      organizations.someorg = {
+        buckets.somebucket = {};
+        auths.sometoken = {
+          description = "some auth token";
+          readBuckets = ["somebucket"];
+          writeBuckets = ["somebucket"];
+        };
+      };
+      users.someuser.passwordFile = pkgs.writeText "tmp-pw" "abcgoiuhaoga";
+    };
+
+    specialisation.withModifications.configuration = { ... }: {
+      services.influxdb2.provision = {
+        organizations.someorg.buckets.somebucket.present = false;
+        organizations.someorg.auths.sometoken.present = false;
+        users.someuser.present = false;
+
+        organizations.myorg = {
+          description = "Myorg description";
+          buckets.mybucket = {
+            description = "Mybucket description";
+          };
+          auths.mytoken = {
+            operator = true;
+            description = "operator token";
+            tokenFile = pkgs.writeText "tmp-tok" "someusertoken";
+          };
+        };
+        users.myuser.passwordFile = pkgs.writeText "tmp-pw" "abcgoiuhaoga";
+      };
+    };
+
+    specialisation.withParentDelete.configuration = { ... }: {
+      services.influxdb2.provision = {
+        organizations.someorg.present = false;
+        # Deleting the parent implies:
+        #organizations.someorg.buckets.somebucket.present = false;
+        #organizations.someorg.auths.sometoken.present = false;
+      };
+    };
+
+    specialisation.withNewTokens.configuration = { ... }: {
+      services.influxdb2.provision = {
+        organizations.default = {
+          auths.operator = {
+            operator = true;
+            description = "new optoken";
+            tokenFile = pkgs.writeText "tmp-tok" "newoptoken";
+          };
+          auths.allaccess = {
+            operator = true;
+            description = "new allaccess";
+            tokenFile = pkgs.writeText "tmp-tok" "newallaccess";
+          };
+          auths.specifics = {
+            description = "new specifics";
+            readPermissions = ["users" "tasks"];
+            writePermissions = ["tasks"];
+            tokenFile = pkgs.writeText "tmp-tok" "newspecificstoken";
+          };
+        };
+      };
     };
   };
 
   testScript = { nodes, ... }:
     let
+      specialisations = "${nodes.machine.system.build.toplevel}/specialisation";
       tokenArg = "--token verysecureadmintoken";
     in ''
+      def assert_contains(haystack, needle):
+          if needle not in haystack:
+              print("The haystack that will cause the following exception is:")
+              print("---")
+              print(haystack)
+              print("---")
+              raise Exception(f"Expected string '{needle}' was not found")
+
+      def assert_lacks(haystack, needle):
+          if needle in haystack:
+              print("The haystack that will cause the following exception is:")
+              print("---")
+              print(haystack, end="")
+              print("---")
+              raise Exception(f"Unexpected string '{needle}' was found")
+
       machine.wait_for_unit("influxdb2.service")
 
       machine.fail("curl --fail -X POST 'http://localhost:8086/api/v2/signin' -u admin:wrongpassword")
       machine.succeed("curl --fail -X POST 'http://localhost:8086/api/v2/signin' -u admin:ExAmPl3PA55W0rD")
 
       out = machine.succeed("influx org list ${tokenArg}")
-      assert "default" in out
+      assert_contains(out, "default")
+      assert_lacks(out, "myorg")
+      assert_contains(out, "someorg")
 
       out = machine.succeed("influx bucket list ${tokenArg} --org default")
-      assert "default" in out
+      assert_contains(out, "default")
+
+      machine.fail("influx bucket list ${tokenArg} --org myorg")
+
+      out = machine.succeed("influx bucket list ${tokenArg} --org someorg")
+      assert_contains(out, "somebucket")
+
+      out = machine.succeed("influx user list ${tokenArg}")
+      assert_contains(out, "admin")
+      assert_lacks(out, "myuser")
+      assert_contains(out, "someuser")
+
+      out = machine.succeed("influx auth list ${tokenArg}")
+      assert_lacks(out, "operator token")
+      assert_contains(out, "some auth token")
+
+      with subtest("withModifications"):
+        machine.succeed('${specialisations}/withModifications/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx org list ${tokenArg}")
+        assert_contains(out, "default")
+        assert_contains(out, "myorg")
+        assert_contains(out, "someorg")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org myorg")
+        assert_contains(out, "mybucket")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org someorg")
+        assert_lacks(out, "somebucket")
+
+        out = machine.succeed("influx user list ${tokenArg}")
+        assert_contains(out, "admin")
+        assert_contains(out, "myuser")
+        assert_lacks(out, "someuser")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_lacks(out, "some auth token")
+
+        # Make sure the user token is also usable
+        machine.succeed("influx auth list --token someusertoken")
+
+      with subtest("keepsUnrelated"):
+        machine.succeed('${nodes.machine.system.build.toplevel}/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx org list ${tokenArg}")
+        assert_contains(out, "default")
+        assert_contains(out, "myorg")
+        assert_contains(out, "someorg")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org default")
+        assert_contains(out, "default")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org myorg")
+        assert_contains(out, "mybucket")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org someorg")
+        assert_contains(out, "somebucket")
+
+        out = machine.succeed("influx user list ${tokenArg}")
+        assert_contains(out, "admin")
+        assert_contains(out, "myuser")
+        assert_contains(out, "someuser")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_contains(out, "some auth token")
+
+      with subtest("withParentDelete"):
+        machine.succeed('${specialisations}/withParentDelete/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx org list ${tokenArg}")
+        assert_contains(out, "default")
+        assert_contains(out, "myorg")
+        assert_lacks(out, "someorg")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org default")
+        assert_contains(out, "default")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org myorg")
+        assert_contains(out, "mybucket")
+
+        machine.fail("influx bucket list ${tokenArg} --org someorg")
+
+        out = machine.succeed("influx user list ${tokenArg}")
+        assert_contains(out, "admin")
+        assert_contains(out, "myuser")
+        assert_contains(out, "someuser")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_lacks(out, "some auth token")
+
+      with subtest("withNewTokens"):
+        machine.succeed('${specialisations}/withNewTokens/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_contains(out, "some auth token")
+        assert_contains(out, "new optoken")
+        assert_contains(out, "new allaccess")
+        assert_contains(out, "new specifics")
     '';
 })
diff --git a/nixpkgs/nixos/tests/listmonk.nix b/nixpkgs/nixos/tests/listmonk.nix
index 91003653c09e..938c36026a7f 100644
--- a/nixpkgs/nixos/tests/listmonk.nix
+++ b/nixpkgs/nixos/tests/listmonk.nix
@@ -42,20 +42,27 @@ import ./make-test-python.nix ({ lib, ... }: {
     machine.wait_for_open_port(9000)
     machine.succeed("[[ -f /var/lib/listmonk/.db_settings_initialized ]]")
 
+    assert json.loads(machine.succeed(generate_listmonk_request("GET", 'health')))['data'], 'Health endpoint returned unexpected value'
+
+    # A sample subscriber is guaranteed to exist at install-time
+    # A sample transactional template is guaranteed to exist at install-time
+    subscribers = json.loads(machine.succeed(generate_listmonk_request('GET', "subscribers")))['data']['results']
+    templates = json.loads(machine.succeed(generate_listmonk_request('GET', "templates")))['data']
+    tx_template = templates[2]
+
     # Test transactional endpoint
-    # subscriber_id=1 is guaranteed to exist at install-time
-    # template_id=2 is guaranteed to exist at install-time and is a transactional template (1 is a campaign template).
-    machine.succeed(
-      generate_listmonk_request('POST', 'tx', data={'subscriber_id': 1, 'template_id': 2})
-    )
-    assert 'Welcome John Doe' in machine.succeed(
+    print(machine.succeed(
+      generate_listmonk_request('POST', 'tx', data={'subscriber_id': subscribers[0]['id'], 'template_id': tx_template['id']})
+    ))
+
+    assert 'Welcome Anon Doe' in machine.succeed(
         "curl --fail http://localhost:8025/api/v2/messages"
-    )
+    ), "Failed to find Welcome John Doe inside the messages API endpoint"
 
     # Test campaign endpoint
     # Based on https://github.com/knadh/listmonk/blob/174a48f252a146d7e69dab42724e3329dbe25ebe/cmd/campaigns.go#L549 as docs do not exist.
     campaign_data = json.loads(machine.succeed(
-      generate_listmonk_request('POST', 'campaigns/1/test', data={'template_id': 1, 'subscribers': ['john@example.com'], 'name': 'Test', 'subject': 'NixOS is great', 'lists': [1], 'messenger': 'email'})
+      generate_listmonk_request('POST', 'campaigns/1/test', data={'template_id': templates[0]['id'], 'subscribers': ['john@example.com'], 'name': 'Test', 'subject': 'NixOS is great', 'lists': [1], 'messenger': 'email'})
     ))
 
     assert campaign_data['data']  # This is a boolean asserting if the test was successful or not: https://github.com/knadh/listmonk/blob/174a48f252a146d7e69dab42724e3329dbe25ebe/cmd/campaigns.go#L626
diff --git a/nixpkgs/nixos/tests/lxd-image-server.nix b/nixpkgs/nixos/tests/lxd-image-server.nix
index e5a292b61bd9..d0afa495a5b1 100644
--- a/nixpkgs/nixos/tests/lxd-image-server.nix
+++ b/nixpkgs/nixos/tests/lxd-image-server.nix
@@ -61,14 +61,14 @@ in {
     machine.wait_for_unit("lxd.service")
     machine.wait_for_file("/var/lib/lxd/unix.socket")
 
-    # It takes additional second for lxd to settle
-    machine.sleep(1)
+    # Wait for lxd to settle
+    machine.succeed("lxd waitready")
 
     # lxd expects the pool's directory to already exist
     machine.succeed("mkdir /var/lxd-pool")
 
     machine.succeed(
-        "cat ${./common/lxd/config.yaml} | lxd init --preseed"
+        "lxd init --minimal"
     )
 
     machine.succeed(
diff --git a/nixpkgs/nixos/tests/lxd.nix b/nixpkgs/nixos/tests/lxd/container.nix
index 2c2c19e0eecf..9e56f6e41e05 100644
--- a/nixpkgs/nixos/tests/lxd.nix
+++ b/nixpkgs/nixos/tests/lxd/container.nix
@@ -1,7 +1,7 @@
-import ./make-test-python.nix ({ pkgs, lib, ... } :
+import ../make-test-python.nix ({ pkgs, lib, ... } :
 
 let
-  lxd-image = import ../release.nix {
+  lxd-image = import ../../release.nix {
     configuration = {
       # Building documentation makes the test unnecessarily take a longer time:
       documentation.enable = lib.mkForce false;
@@ -38,19 +38,18 @@ in {
   };
 
   testScript = ''
+    def instance_is_up(_) -> bool:
+      status, _ = machine.execute("lxc exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+      return status == 0
+
     machine.wait_for_unit("sockets.target")
     machine.wait_for_unit("lxd.service")
     machine.wait_for_file("/var/lib/lxd/unix.socket")
 
-    # It takes additional second for lxd to settle
-    machine.sleep(1)
-
-    # lxd expects the pool's directory to already exist
-    machine.succeed("mkdir /var/lxd-pool")
+    # Wait for lxd to settle
+    machine.succeed("lxd waitready")
 
-    machine.succeed(
-        "cat ${./common/lxd/config.yaml} | lxd init --preseed"
-    )
+    machine.succeed("lxd init --minimal")
 
     machine.succeed(
         "lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos"
@@ -58,21 +57,23 @@ in {
 
     with subtest("Container can be managed"):
         machine.succeed("lxc launch nixos container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
         machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
-        machine.succeed("lxc exec container true")
         machine.succeed("lxc delete -f container")
 
     with subtest("Container is mounted with lxcfs inside"):
         machine.succeed("lxc launch nixos container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
 
         ## ---------- ##
         ## limits.cpu ##
 
         machine.succeed("lxc config set container limits.cpu 1")
         machine.succeed("lxc restart container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
 
         assert (
             "1"
@@ -81,7 +82,8 @@ in {
 
         machine.succeed("lxc config set container limits.cpu 2")
         machine.succeed("lxc restart container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
 
         assert (
             "2"
@@ -93,7 +95,8 @@ in {
 
         machine.succeed("lxc config set container limits.memory 64MB")
         machine.succeed("lxc restart container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
 
         assert (
             "MemTotal:          62500 kB"
@@ -102,7 +105,8 @@ in {
 
         machine.succeed("lxc config set container limits.memory 128MB")
         machine.succeed("lxc restart container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
 
         assert (
             "MemTotal:         125000 kB"
diff --git a/nixpkgs/nixos/tests/lxd/default.nix b/nixpkgs/nixos/tests/lxd/default.nix
new file mode 100644
index 000000000000..2e34907d7936
--- /dev/null
+++ b/nixpkgs/nixos/tests/lxd/default.nix
@@ -0,0 +1,9 @@
+{
+  system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. {inherit system config;},
+}: {
+  container = import ./container.nix {inherit system pkgs;};
+  nftables = import ./nftables.nix {inherit system pkgs;};
+  ui = import ./ui.nix {inherit system pkgs;};
+}
diff --git a/nixpkgs/nixos/tests/lxd-nftables.nix b/nixpkgs/nixos/tests/lxd/nftables.nix
index 293065001567..b85caa9eb368 100644
--- a/nixpkgs/nixos/tests/lxd-nftables.nix
+++ b/nixpkgs/nixos/tests/lxd/nftables.nix
@@ -5,7 +5,7 @@
 # iptables to nftables requires a full reboot, which is a bit hard inside NixOS
 # tests.
 
-import ./make-test-python.nix ({ pkgs, ...} : {
+import ../make-test-python.nix ({ pkgs, ...} : {
   name = "lxd-nftables";
 
   meta = with pkgs.lib.maintainers; {
diff --git a/nixpkgs/nixos/tests/lxd-ui.nix b/nixpkgs/nixos/tests/lxd/ui.nix
index 19eaa226c0bf..86cb30d8c2b6 100644
--- a/nixpkgs/nixos/tests/lxd-ui.nix
+++ b/nixpkgs/nixos/tests/lxd/ui.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, lib, ... }: {
+import ../make-test-python.nix ({ pkgs, lib, ... }: {
   name = "lxd-ui";
 
   meta = with pkgs.lib.maintainers; {
diff --git a/nixpkgs/nixos/tests/os-prober.nix b/nixpkgs/nixos/tests/os-prober.nix
index 22e720824c80..dae1306bd69d 100644
--- a/nixpkgs/nixos/tests/os-prober.nix
+++ b/nixpkgs/nixos/tests/os-prober.nix
@@ -76,6 +76,7 @@ in {
       # nixos-rebuild needs must be included in the VM.
       system.extraDependencies = with pkgs;
         [
+          bintools
           brotli
           brotli.dev
           brotli.lib
diff --git a/nixpkgs/nixos/tests/virtualbox.nix b/nixpkgs/nixos/tests/virtualbox.nix
index 062b125eb611..e522d0679e15 100644
--- a/nixpkgs/nixos/tests/virtualbox.nix
+++ b/nixpkgs/nixos/tests/virtualbox.nix
@@ -519,4 +519,4 @@ in mapAttrs (mkVBoxTest false vboxVMs) {
     destroy_vm_test1()
     destroy_vm_test2()
   '';
-} // (lib.optionalAttrs enableUnfree unfreeTests)
+} // (optionalAttrs enableUnfree unfreeTests)
diff --git a/nixpkgs/nixos/tests/web-apps/netbox-upgrade.nix b/nixpkgs/nixos/tests/web-apps/netbox-upgrade.nix
new file mode 100644
index 000000000000..602cf8d889d4
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/netbox-upgrade.nix
@@ -0,0 +1,85 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }: let
+  oldNetbox = pkgs.netbox_3_3;
+in {
+  name = "netbox-upgrade";
+
+  meta = with lib.maintainers; {
+    maintainers = [ minijackson ];
+  };
+
+  nodes.machine = { config, ... }: {
+    services.netbox = {
+      enable = true;
+      package = oldNetbox;
+      secretKeyFile = pkgs.writeText "secret" ''
+        abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
+      '';
+    };
+
+    services.nginx = {
+      enable = true;
+
+      recommendedProxySettings = true;
+
+      virtualHosts.netbox = {
+        default = true;
+        locations."/".proxyPass = "http://localhost:${toString config.services.netbox.port}";
+        locations."/static/".alias = "/var/lib/netbox/static/";
+      };
+    };
+
+    users.users.nginx.extraGroups = [ "netbox" ];
+
+    networking.firewall.allowedTCPPorts = [ 80 ];
+
+    specialisation.upgrade.configuration.services.netbox.package = lib.mkForce pkgs.netbox;
+  };
+
+  testScript = { nodes, ... }:
+    let
+      apiVersion = version: lib.pipe version [
+        (lib.splitString ".")
+        (lib.take 2)
+        (lib.concatStringsSep ".")
+      ];
+      oldApiVersion = apiVersion oldNetbox.version;
+      newApiVersion = apiVersion pkgs.netbox.version;
+    in
+    ''
+      start_all()
+      machine.wait_for_unit("netbox.target")
+      machine.wait_for_unit("nginx.service")
+      machine.wait_until_succeeds("journalctl --since -1m --unit netbox --grep Listening")
+
+      def api_version(headers):
+          header = [header for header in headers.splitlines() if header.startswith("API-Version:")][0]
+          return header.split()[1]
+
+      def check_api_version(version):
+          headers = machine.succeed(
+            "curl -sSfL http://localhost/api/ --head -H 'Content-Type: application/json'"
+          )
+          assert api_version(headers) == version
+
+      with subtest("NetBox version is the old one"):
+          check_api_version("${oldApiVersion}")
+
+      # Somehow, even though netbox-housekeeping.service has After=netbox.service,
+      # netbox-housekeeping.service and netbox.service still get started at the
+      # same time, making netbox-housekeeping fail (can't really do some house
+      # keeping job if the database is not correctly formed).
+      #
+      # So we don't check that the upgrade went well, we just check that
+      # netbox.service is active, and that netbox-housekeeping can be run
+      # successfully afterwards.
+      #
+      # This is not good UX, but the system should be working nonetheless.
+      machine.execute("${nodes.machine.system.build.toplevel}/specialisation/upgrade/bin/switch-to-configuration test >&2")
+
+      machine.wait_for_unit("netbox.service")
+      machine.succeed("systemctl start netbox-housekeeping.service")
+
+      with subtest("NetBox version is the new one"):
+          check_api_version("${newApiVersion}")
+    '';
+})