about summary refs log tree commit diff
path: root/nixpkgs/nixos/modules/services
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos/modules/services')
-rw-r--r--nixpkgs/nixos/modules/services/backup/restic.nix2
-rw-r--r--nixpkgs/nixos/modules/services/editors/emacs.md24
-rw-r--r--nixpkgs/nixos/modules/services/finance/odoo.nix8
-rw-r--r--nixpkgs/nixos/modules/services/misc/paperless.nix2
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix7
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix60
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix19
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix84
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/vmagent.nix12
-rw-r--r--nixpkgs/nixos/modules/services/networking/firewall-nftables.nix9
-rw-r--r--nixpkgs/nixos/modules/services/networking/nat-nftables.nix36
-rw-r--r--nixpkgs/nixos/modules/services/networking/nftables.nix156
-rw-r--r--nixpkgs/nixos/modules/services/networking/nncp.nix131
-rw-r--r--nixpkgs/nixos/modules/services/networking/privoxy.nix2
-rw-r--r--nixpkgs/nixos/modules/services/security/kanidm.nix6
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/cloudlog.nix2
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/honk.md23
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/honk.nix153
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/lemmy.nix2
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/netbox.nix12
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix40
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/sddm.nix1
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/default.nix1
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/ragnarwm.nix33
24 files changed, 700 insertions, 125 deletions
diff --git a/nixpkgs/nixos/modules/services/backup/restic.nix b/nixpkgs/nixos/modules/services/backup/restic.nix
index 1620770e5b56..6f4cbab81726 100644
--- a/nixpkgs/nixos/modules/services/backup/restic.nix
+++ b/nixpkgs/nixos/modules/services/backup/restic.nix
@@ -333,6 +333,8 @@ in
               backup.rcloneConfig);
             path = [ pkgs.openssh ];
             restartIfChanged = false;
+            wants = [ "network-online.target" ];
+            after = [ "network-online.target" ];
             serviceConfig = {
               Type = "oneshot";
               ExecStart = (optionals (backupPaths != "") [ "${resticCmd} backup ${concatStringsSep " " (backup.extraBackupArgs ++ excludeFlags)} ${backupPaths}" ])
diff --git a/nixpkgs/nixos/modules/services/editors/emacs.md b/nixpkgs/nixos/modules/services/editors/emacs.md
index 72364b295144..9db1bd594175 100644
--- a/nixpkgs/nixos/modules/services/editors/emacs.md
+++ b/nixpkgs/nixos/modules/services/editors/emacs.md
@@ -286,11 +286,11 @@ The server should now be ready to serve Emacs clients.
 
 ### Starting the client {#module-services-emacs-starting-client}
 
-Ensure that the emacs server is enabled, either by customizing the
+Ensure that the Emacs server is enabled, either by customizing the
 {var}`server-mode` variable, or by adding
 `(server-start)` to {file}`~/.emacs`.
 
-To connect to the emacs daemon, run one of the following:
+To connect to the Emacs daemon, run one of the following:
 ```
 emacsclient FILENAME
 emacsclient --create-frame  # opens a new frame (window)
@@ -339,24 +339,10 @@ This will add the symlink
 
 ## Configuring Emacs {#module-services-emacs-configuring}
 
-The Emacs init file should be changed to load the extension packages at
-startup:
+If you want to only use extension packages from Nixpkgs, you can add
+`(setq package-archives nil)` to your init file.
 
-::: {.example #module-services-emacs-package-initialisation}
-### Package initialization in `.emacs`
-
-```
-(require 'package)
-
-;; optional. makes unpure packages archives unavailable
-(setq package-archives nil)
-
-(setq package-enable-at-startup nil)
-(package-initialize)
-```
-:::
-
-After the declarative emacs package configuration has been tested,
+After the declarative Emacs package configuration has been tested,
 previously downloaded packages can be cleaned up by removing
 {file}`~/.emacs.d/elpa` (do make a backup first, in case you
 forgot a package).
diff --git a/nixpkgs/nixos/modules/services/finance/odoo.nix b/nixpkgs/nixos/modules/services/finance/odoo.nix
index fee9af574b5d..eec7c4e30cc4 100644
--- a/nixpkgs/nixos/modules/services/finance/odoo.nix
+++ b/nixpkgs/nixos/modules/services/finance/odoo.nix
@@ -31,6 +31,12 @@ in
         description = lib.mdDoc ''
           Odoo configuration settings. For more details see <https://www.odoo.com/documentation/15.0/administration/install/deploy.html>
         '';
+        example = literalExpression ''
+          options = {
+            db_user = "odoo";
+            db_password="odoo";
+          };
+        '';
       };
 
       domain = mkOption {
@@ -112,11 +118,11 @@ in
     services.postgresql = {
       enable = true;
 
+      ensureDatabases = [ "odoo" ];
       ensureUsers = [{
         name = "odoo";
         ensurePermissions = { "DATABASE odoo" = "ALL PRIVILEGES"; };
       }];
-      ensureDatabases = [ "odoo" ];
     };
   });
 }
diff --git a/nixpkgs/nixos/modules/services/misc/paperless.nix b/nixpkgs/nixos/modules/services/misc/paperless.nix
index 0683a1f922ab..74a3b49ac9a6 100644
--- a/nixpkgs/nixos/modules/services/misc/paperless.nix
+++ b/nixpkgs/nixos/modules/services/misc/paperless.nix
@@ -43,6 +43,8 @@ let
       "-/etc/nsswitch.conf"
       "-/etc/hosts"
       "-/etc/localtime"
+      "-/etc/ssl/certs"
+      "-/etc/static/ssl/certs"
       "-/run/postgresql"
     ] ++ (optional enableRedis redisServer.unixSocket);
     BindPaths = [
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix
index f5b97c51186a..8bb017894ee2 100644
--- a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -50,6 +50,7 @@ let
     "mikrotik"
     "minio"
     "modemmanager"
+    "mysqld"
     "nextcloud"
     "nginx"
     "nginxlog"
@@ -297,6 +298,12 @@ in
           or 'services.prometheus.exporters.mail.configFile'.
       '';
     } {
+      assertion = cfg.mysqld.runAsLocalSuperUser -> config.services.mysql.enable;
+      message = ''
+        The exporter is configured to run as 'services.mysql.user', but
+          'services.mysql.enable' is set to false.
+      '';
+    } {
       assertion = cfg.sql.enable -> (
         (cfg.sql.configFile == null) != (cfg.sql.configuration == null)
       );
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix
new file mode 100644
index 000000000000..849c514de681
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, options }:
+let
+  cfg = config.services.prometheus.exporters.mysqld;
+  inherit (lib) types mkOption mdDoc mkIf mkForce cli concatStringsSep optionalString escapeShellArgs;
+in {
+  port = 9104;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+
+    runAsLocalSuperUser = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Whether to run the exporter as {option}`services.mysql.user`.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.path;
+      example = "/var/lib/prometheus-mysqld-exporter.cnf";
+      description = mdDoc ''
+        Path to the services config file.
+
+        See <https://github.com/prometheus/mysqld_exporter#running> for more information about
+        the available options.
+
+        ::: {.warn}
+        Please do not store this file in the nix store if you choose to include any credentials here,
+        as it would be world-readable.
+        :::
+      '';
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = !cfg.runAsLocalSuperUser;
+      User = mkIf cfg.runAsLocalSuperUser (mkForce config.services.mysql.user);
+      LoadCredential = mkIf (cfg.configFile != null) (mkForce ("config:" + cfg.configFile));
+      ExecStart = concatStringsSep " " [
+        "${pkgs.prometheus-mysqld-exporter}/bin/mysqld_exporter"
+        "--web.listen-address=${cfg.listenAddress}:${toString cfg.port}"
+        "--web.telemetry-path=${cfg.telemetryPath}"
+        (optionalString (cfg.configFile != null) ''--config.my-cnf=''${CREDENTIALS_DIRECTORY}/config'')
+        (escapeShellArgs cfg.extraFlags)
+      ];
+      RestrictAddressFamilies = [
+        # The exporter can be configured to talk to a local mysql server via a unix socket.
+        "AF_UNIX"
+      ];
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
index 7808c8861a76..28add020f5cc 100644
--- a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
@@ -33,6 +33,15 @@ in
         Make sure that this file is readable by the exporter user.
       '';
     };
+    tokenFile = mkOption {
+      type = types.path;
+      example = "/path/to/token-file";
+      default = "";
+      description = lib.mdDoc ''
+        File containing the token for connecting to Nextcloud.
+        Make sure that this file is readable by the exporter user.
+      '';
+    };
     timeout = mkOption {
       type = types.str;
       default = "5s";
@@ -47,12 +56,14 @@ in
       ExecStart = ''
         ${pkgs.prometheus-nextcloud-exporter}/bin/nextcloud-exporter \
           --addr ${cfg.listenAddress}:${toString cfg.port} \
-          --username ${cfg.username} \
           --timeout ${cfg.timeout} \
           --server ${cfg.url} \
-          --password ${escapeShellArg "@${cfg.passwordFile}"} \
-          ${concatStringsSep " \\\n  " cfg.extraFlags}
-      '';
+          ${if cfg.tokenFile == "" then ''
+            --username ${cfg.username} \
+            --password ${escapeShellArg "@${cfg.passwordFile}"} \
+         '' else ''
+            --auth-token ${escapeShellArg "@${cfg.tokenFile}"} \
+         ''} ${concatStringsSep " \\\n  " cfg.extraFlags}'';
     };
   };
 }
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
index f52d92a73d5d..f2336429d42f 100644
--- a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
@@ -1,4 +1,8 @@
-{ config, lib, pkgs, options }:
+{ config
+, lib
+, pkgs
+, options
+}:
 
 with lib;
 
@@ -6,17 +10,14 @@ let
   cfg = config.services.prometheus.exporters.unbound;
 in
 {
+  imports = [
+    (mkRemovedOptionModule [ "controlInterface" ] "This option was removed, use the `unbound.host` option instead.")
+    (mkRemovedOptionModule [ "fetchType" ] "This option was removed, use the `unbound.host` option instead.")
+    ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+  ];
+
   port = 9167;
   extraOpts = {
-    fetchType = mkOption {
-      # TODO: add shm when upstream implemented it
-      type = types.enum [ "tcp" "uds" ];
-      default = "uds";
-      description = lib.mdDoc ''
-        Which methods the exporter uses to get the information from unbound.
-      '';
-    };
-
     telemetryPath = mkOption {
       type = types.str;
       default = "/metrics";
@@ -25,34 +26,65 @@ in
       '';
     };
 
-    controlInterface = mkOption {
-      type = types.nullOr types.str;
-      default = null;
-      example = "/run/unbound/unbound.socket";
-      description = lib.mdDoc ''
-        Path to the unbound socket for uds mode or the control interface port for tcp mode.
+    unbound = {
+      ca = mkOption {
+        type = types.nullOr types.path;
+        default = "/var/lib/unbound/unbound_server.pem";
+        example = null;
+        description = ''
+          Path to the Unbound server certificate authority
+        '';
+      };
 
-        Example:
-          uds-mode: /run/unbound/unbound.socket
-          tcp-mode: 127.0.0.1:8953
-      '';
+      certificate = mkOption {
+        type = types.nullOr types.path;
+        default = "/var/lib/unbound/unbound_control.pem";
+        example = null;
+        description = ''
+          Path to the Unbound control socket certificate
+        '';
+      };
+
+      key = mkOption {
+        type = types.nullOr types.path;
+        default = "/var/lib/unbound/unbound_control.key";
+        example = null;
+        description = ''
+          Path to the Unbound control socket key.
+        '';
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "tcp://127.0.0.1:8953";
+        example = "unix:///run/unbound/unbound.socket";
+        description = lib.mdDoc ''
+          Path to the unbound control socket. Supports unix domain sockets, as well as the TCP interface.
+        '';
+      };
     };
   };
 
   serviceOpts = mkMerge ([{
     serviceConfig = {
+      User = "unbound"; # to access the unbound_control.key
       ExecStart = ''
-        ${pkgs.prometheus-unbound-exporter}/bin/unbound-telemetry \
-          ${cfg.fetchType} \
-          --bind ${cfg.listenAddress}:${toString cfg.port} \
-          --path ${cfg.telemetryPath} \
-          ${optionalString (cfg.controlInterface != null) "--control-interface ${cfg.controlInterface}"} \
+        ${pkgs.prometheus-unbound-exporter}/bin/unbound_exporter \
+          --unbound.host "${cfg.unbound.host}" \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          ${optionalString (cfg.unbound.ca != null) "--unbound.ca ${cfg.unbound.ca}"} \
+          ${optionalString (cfg.unbound.certificate != null) "--unbound.cert ${cfg.unbound.certificate}"} \
+          ${optionalString (cfg.unbound.key != null) "--unbound.key ${cfg.unbound.key}"} \
           ${toString cfg.extraFlags}
       '';
       RestrictAddressFamilies = [
-        # Need AF_UNIX to collect data
         "AF_UNIX"
+        "AF_INET"
+        "AF_INET6"
       ];
+    } // optionalAttrs (!config.services.unbound.enable) {
+      DynamicUser = true;
     };
   }] ++ [
     (mkIf config.services.unbound.enable {
diff --git a/nixpkgs/nixos/modules/services/monitoring/vmagent.nix b/nixpkgs/nixos/modules/services/monitoring/vmagent.nix
index c793bb073199..0e2ffb31c57c 100644
--- a/nixpkgs/nixos/modules/services/monitoring/vmagent.nix
+++ b/nixpkgs/nixos/modules/services/monitoring/vmagent.nix
@@ -62,6 +62,16 @@ in {
         Whether to open the firewall for the default ports.
       '';
     };
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra args to pass to `vmagent`. See the docs:
+        <https://docs.victoriametrics.com/vmagent.html#advanced-usage>
+        or {command}`vmagent -help` for more information.
+      '';
+    };
   };
 
   config = mkIf cfg.enable {
@@ -90,7 +100,7 @@ in {
         Type = "simple";
         Restart = "on-failure";
         WorkingDirectory = cfg.dataDir;
-        ExecStart = "${cfg.package}/bin/vmagent -remoteWrite.url=${cfg.remoteWriteUrl} -promscrape.config=${prometheusConfig}";
+        ExecStart = "${cfg.package}/bin/vmagent -remoteWrite.url=${cfg.remoteWriteUrl} -promscrape.config=${prometheusConfig} ${escapeShellArgs cfg.extraArgs}";
       };
     };
 
diff --git a/nixpkgs/nixos/modules/services/networking/firewall-nftables.nix b/nixpkgs/nixos/modules/services/networking/firewall-nftables.nix
index 452dd97d89d2..7c7136cc96f1 100644
--- a/nixpkgs/nixos/modules/services/networking/firewall-nftables.nix
+++ b/nixpkgs/nixos/modules/services/networking/firewall-nftables.nix
@@ -70,10 +70,8 @@ in
       }
     ];
 
-    networking.nftables.ruleset = ''
-
-      table inet nixos-fw {
-
+    networking.nftables.tables."nixos-fw".family = "inet";
+    networking.nftables.tables."nixos-fw".content = ''
         ${optionalString (cfg.checkReversePath != false) ''
           chain rpfilter {
             type filter hook prerouting priority mangle + 10; policy drop;
@@ -169,9 +167,6 @@ in
 
           }
         ''}
-
-      }
-
     '';
 
   };
diff --git a/nixpkgs/nixos/modules/services/networking/nat-nftables.nix b/nixpkgs/nixos/modules/services/networking/nat-nftables.nix
index 483910a16658..4b2317ca2ffc 100644
--- a/nixpkgs/nixos/modules/services/networking/nat-nftables.nix
+++ b/nixpkgs/nixos/modules/services/networking/nat-nftables.nix
@@ -145,28 +145,28 @@ in
       }
     ];
 
-    networking.nftables.ruleset = ''
-      table ip nixos-nat {
-        ${mkTable {
+    networking.nftables.tables = {
+      "nixos-nat" = {
+        family = "ip";
+        content = mkTable {
           ipVer = "ip";
           inherit dest ipSet;
           forwardPorts = filter (x: !(isIPv6 x.destination)) cfg.forwardPorts;
           inherit (cfg) dmzHost;
-        }}
-      }
-
-      ${optionalString cfg.enableIPv6 ''
-        table ip6 nixos-nat {
-          ${mkTable {
-            ipVer = "ip6";
-            dest = destIPv6;
-            ipSet = ipv6Set;
-            forwardPorts = filter (x: isIPv6 x.destination) cfg.forwardPorts;
-            dmzHost = null;
-          }}
-        }
-      ''}
-    '';
+        };
+      };
+      "nixos-nat6" = mkIf cfg.enableIPv6 {
+        family = "ip6";
+        name = "nixos-nat";
+        content = mkTable {
+          ipVer = "ip6";
+          dest = destIPv6;
+          ipSet = ipv6Set;
+          forwardPorts = filter (x: isIPv6 x.destination) cfg.forwardPorts;
+          dmzHost = null;
+        };
+      };
+    };
 
     networking.firewall.extraForwardRules = optionalString config.networking.firewall.filterForward ''
       ${optionalString (ifaceSet != "") ''
diff --git a/nixpkgs/nixos/modules/services/networking/nftables.nix b/nixpkgs/nixos/modules/services/networking/nftables.nix
index faff1dca89ba..0e4cd6fa1503 100644
--- a/nixpkgs/nixos/modules/services/networking/nftables.nix
+++ b/nixpkgs/nixos/modules/services/networking/nftables.nix
@@ -2,6 +2,35 @@
 with lib;
 let
   cfg = config.networking.nftables;
+
+  tableSubmodule = { name, ... }: {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Enable this table.";
+      };
+
+      name = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Table name.";
+      };
+
+      content = mkOption {
+        type = types.lines;
+        description = lib.mdDoc "The table content.";
+      };
+
+      family = mkOption {
+        description = lib.mdDoc "Table family.";
+        type = types.enum [ "ip" "ip6" "inet" "arp" "bridge" "netdev" ];
+      };
+    };
+
+    config = {
+      name = mkDefault name;
+    };
+  };
 in
 {
   ###### interface
@@ -54,6 +83,24 @@ in
       '';
     };
 
+    networking.nftables.flushRuleset = mkEnableOption (lib.mdDoc "Flush the entire ruleset on each reload.");
+
+    networking.nftables.extraDeletions = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        # this makes deleting a non-existing table a no-op instead of an error
+        table inet some-table;
+
+        delete table inet some-table;
+      '';
+      description =
+        lib.mdDoc ''
+          Extra deletion commands to be run on every firewall start, reload
+          and after stopping the firewall.
+        '';
+    };
+
     networking.nftables.ruleset = mkOption {
       type = types.lines;
       default = "";
@@ -103,7 +150,10 @@ in
         lib.mdDoc ''
           The ruleset to be used with nftables.  Should be in a format that
           can be loaded using "/bin/nft -f".  The ruleset is updated atomically.
-          This option conflicts with rulesetFile.
+          Note that if the tables should be cleaned first, either:
+          - networking.nftables.flushRuleset = true; needs to be set (flushes all tables)
+          - networking.nftables.extraDeletions needs to be set
+          - or networking.nftables.tables can be used, which will clean up the table automatically
         '';
     };
     networking.nftables.rulesetFile = mkOption {
@@ -113,9 +163,64 @@ in
         lib.mdDoc ''
           The ruleset file to be used with nftables.  Should be in a format that
           can be loaded using "nft -f".  The ruleset is updated atomically.
-          This option conflicts with ruleset and nftables based firewall.
         '';
     };
+    networking.nftables.tables = mkOption {
+      type = types.attrsOf (types.submodule tableSubmodule);
+
+      default = {};
+
+      description = lib.mdDoc ''
+        Tables to be added to ruleset.
+        Tables will be added together with delete statements to clean up the table before every update.
+      '';
+
+      example = {
+        filter = {
+          family = "inet";
+          content = ''
+            # Check out https://wiki.nftables.org/ for better documentation.
+            # Table for both IPv4 and IPv6.
+            # Block all incoming connections traffic except SSH and "ping".
+            chain input {
+              type filter hook input priority 0;
+
+              # accept any localhost traffic
+              iifname lo accept
+
+              # accept traffic originated from us
+              ct state {established, related} accept
+
+              # ICMP
+              # routers may also want: mld-listener-query, nd-router-solicit
+              ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, packet-too-big, time-exceeded, parameter-problem, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert } accept
+              ip protocol icmp icmp type { destination-unreachable, router-advertisement, time-exceeded, parameter-problem } accept
+
+              # allow "ping"
+              ip6 nexthdr icmpv6 icmpv6 type echo-request accept
+              ip protocol icmp icmp type echo-request accept
+
+              # accept SSH connections (required for a server)
+              tcp dport 22 accept
+
+              # count and drop any other traffic
+              counter drop
+            }
+
+            # Allow all outgoing connections.
+            chain output {
+              type filter hook output priority 0;
+              accept
+            }
+
+            chain forward {
+              type filter hook forward priority 0;
+              accept
+            }
+          '';
+        };
+      };
+    };
   };
 
   ###### implementation
@@ -124,6 +229,8 @@ in
     boot.blacklistedKernelModules = [ "ip_tables" ];
     environment.systemPackages = [ pkgs.nftables ];
     networking.networkmanager.firewallBackend = mkDefault "nftables";
+    # versionOlder for backportability, remove afterwards
+    networking.nftables.flushRuleset = mkDefault (versionOlder config.system.stateVersion "23.11" || (cfg.rulesetFile != null || cfg.ruleset != ""));
     systemd.services.nftables = {
       description = "nftables firewall";
       before = [ "network-pre.target" ];
@@ -131,18 +238,49 @@ in
       wantedBy = [ "multi-user.target" ];
       reloadIfChanged = true;
       serviceConfig = let
+        enabledTables = filterAttrs (_: table: table.enable) cfg.tables;
+        deletionsScript = pkgs.writeScript "nftables-deletions" ''
+          #! ${pkgs.nftables}/bin/nft -f
+          ${if cfg.flushRuleset then "flush ruleset"
+            else concatStringsSep "\n" (mapAttrsToList (_: table: ''
+              table ${table.family} ${table.name}
+              delete table ${table.family} ${table.name}
+            '') enabledTables)}
+          ${cfg.extraDeletions}
+        '';
+        deletionsScriptVar = "/var/lib/nftables/deletions.nft";
+        ensureDeletions = pkgs.writeShellScript "nftables-ensure-deletions" ''
+          touch ${deletionsScriptVar}
+          chmod +x ${deletionsScriptVar}
+        '';
+        saveDeletionsScript = pkgs.writeShellScript "nftables-save-deletions" ''
+          cp ${deletionsScript} ${deletionsScriptVar}
+        '';
+        cleanupDeletionsScript = pkgs.writeShellScript "nftables-cleanup-deletions" ''
+          rm ${deletionsScriptVar}
+        '';
         rulesScript = pkgs.writeTextFile {
           name =  "nftables-rules";
           executable = true;
           text = ''
             #! ${pkgs.nftables}/bin/nft -f
-            flush ruleset
-            ${if cfg.rulesetFile != null then ''
+            # previous deletions, if any
+            include "${deletionsScriptVar}"
+            # current deletions
+            include "${deletionsScript}"
+            ${concatStringsSep "\n" (mapAttrsToList (_: table: ''
+              table ${table.family} ${table.name} {
+                ${table.content}
+              }
+            '') enabledTables)}
+            ${cfg.ruleset}
+            ${lib.optionalString (cfg.rulesetFile != null) ''
               include "${cfg.rulesetFile}"
-            '' else cfg.ruleset}
+            ''}
           '';
           checkPhase = lib.optionalString cfg.checkRuleset ''
             cp $out ruleset.conf
+            sed 's|include "${deletionsScriptVar}"||' -i ruleset.conf
             ${cfg.preCheckRuleset}
             export NIX_REDIRECTS=/etc/protocols=${pkgs.buildPackages.iana-etc}/etc/protocols:/etc/services=${pkgs.buildPackages.iana-etc}/etc/services
             LD_PRELOAD="${pkgs.buildPackages.libredirect}/lib/libredirect.so ${pkgs.buildPackages.lklWithFirewall.lib}/lib/liblkl-hijack.so" \
@@ -152,9 +290,11 @@ in
       in {
         Type = "oneshot";
         RemainAfterExit = true;
-        ExecStart = rulesScript;
-        ExecReload = rulesScript;
-        ExecStop = "${pkgs.nftables}/bin/nft flush ruleset";
+        ExecStart = [ ensureDeletions rulesScript ];
+        ExecStartPost = saveDeletionsScript;
+        ExecReload = [ ensureDeletions rulesScript saveDeletionsScript ];
+        ExecStop = [ deletionsScriptVar cleanupDeletionsScript ];
+        StateDirectory = "nftables";
       };
     };
   };
diff --git a/nixpkgs/nixos/modules/services/networking/nncp.nix b/nixpkgs/nixos/modules/services/networking/nncp.nix
new file mode 100644
index 000000000000..3cfe41995e76
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nncp.nix
@@ -0,0 +1,131 @@
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  nncpCfgFile = "/run/nncp.hjson";
+  programCfg = config.programs.nncp;
+  callerCfg = config.services.nncp.caller;
+  daemonCfg = config.services.nncp.daemon;
+  settingsFormat = pkgs.formats.json { };
+  jsonCfgFile = settingsFormat.generate "nncp.json" programCfg.settings;
+  pkg = programCfg.package;
+in {
+  options = {
+
+    services.nncp = {
+      caller = {
+        enable = mkEnableOption ''
+          cron'ed NNCP TCP daemon caller.
+          The daemon will take configuration from
+          [](#opt-programs.nncp.settings)
+        '';
+        extraArgs = mkOption {
+          type = with types; listOf str;
+          description = "Extra command-line arguments to pass to caller.";
+          default = [ ];
+          example = [ "-autotoss" ];
+        };
+      };
+
+      daemon = {
+        enable = mkEnableOption ''
+          NNCP TCP synronization daemon.
+          The daemon will take configuration from
+          [](#opt-programs.nncp.settings)
+        '';
+        socketActivation = {
+          enable = mkEnableOption ''
+            Whether to run nncp-daemon persistently or socket-activated.
+          '';
+          listenStreams = mkOption {
+            type = with types; listOf str;
+            description = lib.mdDoc ''
+              TCP sockets to bind to.
+              See [](#opt-systemd.sockets._name_.listenStreams).
+            '';
+            default = [ "5400" ];
+          };
+        };
+        extraArgs = mkOption {
+          type = with types; listOf str;
+          description = "Extra command-line arguments to pass to daemon.";
+          default = [ ];
+          example = [ "-autotoss" ];
+        };
+      };
+
+    };
+  };
+
+  config = mkIf (programCfg.enable or callerCfg.enable or daemonCfg.enable) {
+
+    assertions = [{
+      assertion = with builtins;
+        let
+          callerCongfigured =
+            let neigh = config.programs.nncp.settings.neigh or { };
+            in lib.lists.any (x: hasAttr "calls" x && x.calls != [ ])
+            (attrValues neigh);
+        in !callerCfg.enable || callerCongfigured;
+      message = "NNCP caller enabled but call configuration is missing";
+    }];
+
+    systemd.services."nncp-caller" = {
+      inherit (callerCfg) enable;
+      description = "Croned NNCP TCP daemon caller.";
+      documentation = [ "http://www.nncpgo.org/nncp_002dcaller.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg}/bin/nncp-caller -noprogress -cfg "${nncpCfgFile}" ${
+            lib.strings.escapeShellArgs callerCfg.extraArgs
+          }'';
+        Group = "uucp";
+        UMask = "0002";
+      };
+    };
+
+    systemd.services."nncp-daemon" = mkIf daemonCfg.enable {
+      enable = !daemonCfg.socketActivation.enable;
+      description = "NNCP TCP syncronization daemon.";
+      documentation = [ "http://www.nncpgo.org/nncp_002ddaemon.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg}/bin/nncp-daemon -noprogress -cfg "${nncpCfgFile}" ${
+            lib.strings.escapeShellArgs daemonCfg.extraArgs
+          }'';
+        Restart = "on-failure";
+        Group = "uucp";
+        UMask = "0002";
+      };
+    };
+
+    systemd.services."nncp-daemon@" = mkIf daemonCfg.socketActivation.enable {
+      description = "NNCP TCP syncronization daemon.";
+      documentation = [ "http://www.nncpgo.org/nncp_002ddaemon.html" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg}/bin/nncp-daemon -noprogress -ucspi -cfg "${nncpCfgFile}" ${
+            lib.strings.escapeShellArgs daemonCfg.extraArgs
+          }'';
+        Group = "uucp";
+        UMask = "0002";
+        StandardInput = "socket";
+        StandardOutput = "inherit";
+        StandardError = "journal";
+      };
+    };
+
+    systemd.sockets.nncp-daemon = mkIf daemonCfg.socketActivation.enable {
+      inherit (daemonCfg.socketActivation) listenStreams;
+      description = "socket for NNCP TCP syncronization.";
+      conflicts = [ "nncp-daemon.service" ];
+      wantedBy = [ "sockets.target" ];
+      socketConfig.Accept = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/privoxy.nix b/nixpkgs/nixos/modules/services/networking/privoxy.nix
index 78d02aaa1125..619490a4c020 100644
--- a/nixpkgs/nixos/modules/services/networking/privoxy.nix
+++ b/nixpkgs/nixos/modules/services/networking/privoxy.nix
@@ -12,7 +12,7 @@ let
     else "${name} ${toString val}\n";
 
   configType = with types;
-    let atom = oneOf [ int bool string path ];
+    let atom = oneOf [ int bool str path ];
     in attrsOf (either atom (listOf atom))
     // { description = ''
           privoxy configuration type. The format consists of an attribute
diff --git a/nixpkgs/nixos/modules/services/security/kanidm.nix b/nixpkgs/nixos/modules/services/security/kanidm.nix
index d8a99dee59f4..6f4d1dc382ab 100644
--- a/nixpkgs/nixos/modules/services/security/kanidm.nix
+++ b/nixpkgs/nixos/modules/services/security/kanidm.nix
@@ -137,7 +137,7 @@ in
       default = { };
       description = lib.mdDoc ''
         Settings for Kanidm, see
-        [the documentation](https://github.com/kanidm/kanidm/blob/master/kanidm_book/src/server_configuration.md)
+        [the documentation](https://kanidm.github.io/kanidm/stable/server_configuration.html)
         and [example configuration](https://github.com/kanidm/kanidm/blob/master/examples/server.toml)
         for possible values.
       '';
@@ -155,7 +155,7 @@ in
       };
       description = lib.mdDoc ''
         Configure Kanidm clients, needed for the PAM daemon. See
-        [the documentation](https://github.com/kanidm/kanidm/blob/master/kanidm_book/src/client_tools.md#kanidm-configuration)
+        [the documentation](https://kanidm.github.io/kanidm/stable/client_tools.html#kanidm-configuration)
         and [example configuration](https://github.com/kanidm/kanidm/blob/master/examples/config)
         for possible values.
       '';
@@ -173,7 +173,7 @@ in
       };
       description = lib.mdDoc ''
         Configure Kanidm unix daemon.
-        See [the documentation](https://github.com/kanidm/kanidm/blob/master/kanidm_book/src/pam_and_nsswitch.md#the-unix-daemon)
+        See [the documentation](https://kanidm.github.io/kanidm/stable/integrations/pam_and_nsswitch.html#the-unix-daemon)
         and [example configuration](https://github.com/kanidm/kanidm/blob/master/examples/unixd)
         for possible values.
       '';
diff --git a/nixpkgs/nixos/modules/services/web-apps/cloudlog.nix b/nixpkgs/nixos/modules/services/web-apps/cloudlog.nix
index 9261de8d4354..da2cf93d7f1c 100644
--- a/nixpkgs/nixos/modules/services/web-apps/cloudlog.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/cloudlog.nix
@@ -308,8 +308,6 @@ in
       pools.cloudlog = {
         inherit (cfg) user;
         group = config.services.nginx.group;
-        # cloudlog is currently broken on php 8.2
-        phpPackage = pkgs.php81;
         settings =  {
           "listen.owner" = config.services.nginx.user;
           "listen.group" = config.services.nginx.group;
diff --git a/nixpkgs/nixos/modules/services/web-apps/honk.md b/nixpkgs/nixos/modules/services/web-apps/honk.md
new file mode 100644
index 000000000000..f34085f7dc52
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/honk.md
@@ -0,0 +1,23 @@
+# Honk {#module-services-honk}
+
+With Honk on NixOS you can quickly configure a complete ActivityPub server with
+minimal setup and support costs.
+
+## Basic usage {#module-services-honk-basic-usage}
+
+A minimal configuration looks like this:
+
+```nix
+{
+  services.honk = {
+    enable = true;
+    host = "0.0.0.0";
+    port = 8080;
+    username = "username";
+    passwordFile = "/etc/honk/password.txt";
+    servername = "honk.example.com";
+  };
+
+  networking.firewall.allowedTCPPorts = [ 8080 ];
+}
+```
diff --git a/nixpkgs/nixos/modules/services/web-apps/honk.nix b/nixpkgs/nixos/modules/services/web-apps/honk.nix
new file mode 100644
index 000000000000..e8718774575b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/honk.nix
@@ -0,0 +1,153 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+let
+  cfg = config.services.honk;
+
+  honk-initdb-script = cfg: pkgs.writeShellApplication {
+    name = "honk-initdb-script";
+
+    runtimeInputs = with pkgs; [ coreutils ];
+
+    text = ''
+      PW=$(cat "$CREDENTIALS_DIRECTORY/honk_passwordFile")
+
+      echo -e "${cfg.username}\n''$PW\n${cfg.host}:${toString cfg.port}\n${cfg.servername}" | ${lib.getExe cfg.package} -datadir "$STATE_DIRECTORY" init
+    '';
+  };
+in
+{
+  options = {
+    services.honk = {
+      enable = lib.mkEnableOption (lib.mdDoc "the Honk server");
+      package = lib.mkPackageOptionMD pkgs "honk" { };
+
+      host = lib.mkOption {
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          The host name or IP address the server should listen to.
+        '';
+        type = lib.types.str;
+      };
+
+      port = lib.mkOption {
+        default = 8080;
+        description = lib.mdDoc ''
+          The port the server should listen to.
+        '';
+        type = lib.types.port;
+      };
+
+      username = lib.mkOption {
+        description = lib.mdDoc ''
+          The admin account username.
+        '';
+        type = lib.types.str;
+      };
+
+      passwordFile = lib.mkOption {
+        description = lib.mdDoc ''
+          Password for admin account.
+          NOTE: Should be string not a store path, to prevent the password from being world readable
+        '';
+        type = lib.types.path;
+      };
+
+      servername = lib.mkOption {
+        description = lib.mdDoc ''
+          The server name.
+        '';
+        type = lib.types.str;
+      };
+
+      extraJS = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          An extra JavaScript file to be loaded by the client.
+        '';
+        type = lib.types.nullOr lib.types.path;
+      };
+
+      extraCSS = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          An extra CSS file to be loaded by the client.
+        '';
+        type = lib.types.nullOr lib.types.path;
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.username or "" != "";
+        message = ''
+          You have to define a username for Honk (`services.honk.username`).
+        '';
+      }
+      {
+        assertion = cfg.servername or "" != "";
+        message = ''
+          You have to define a servername for Honk (`services.honk.servername`).
+        '';
+      }
+    ];
+
+    systemd.services.honk-initdb = {
+      description = "Honk server database setup";
+      requiredBy = [ "honk.service" ];
+      before = [ "honk.service" ];
+
+      serviceConfig = {
+        LoadCredential = [
+          "honk_passwordFile:${cfg.passwordFile}"
+        ];
+        Type = "oneshot";
+        StateDirectory = "honk";
+        DynamicUser = true;
+        RemainAfterExit = true;
+        ExecStart = lib.getExe (honk-initdb-script cfg);
+        PrivateTmp = true;
+      };
+
+      unitConfig = {
+        ConditionPathExists = [
+          # Skip this service if the database already exists
+          "!$STATE_DIRECTORY/honk.db"
+        ];
+      };
+    };
+
+    systemd.services.honk = {
+      description = "Honk server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      bindsTo = [ "honk-initdb.service" ];
+      preStart = ''
+        mkdir -p $STATE_DIRECTORY/views
+        ${lib.optionalString (cfg.extraJS != null) "ln -fs ${cfg.extraJS} $STATE_DIRECTORY/views/local.js"}
+        ${lib.optionalString (cfg.extraCSS != null) "ln -fs ${cfg.extraCSS} $STATE_DIRECTORY/views/local.css"}
+        ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk backup $STATE_DIRECTORY/backup
+        ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk upgrade
+        ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk cleanup
+      '';
+      serviceConfig = {
+        ExecStart = ''
+          ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk
+        '';
+        StateDirectory = "honk";
+        DynamicUser = true;
+        PrivateTmp = "yes";
+        Restart = "on-failure";
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ drupol ];
+    doc = ./honk.md;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/lemmy.nix b/nixpkgs/nixos/modules/services/web-apps/lemmy.nix
index 895f3a9f1b4b..20d9dcb7c266 100644
--- a/nixpkgs/nixos/modules/services/web-apps/lemmy.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/lemmy.nix
@@ -160,7 +160,7 @@ in
               root * ${cfg.ui.package}/dist
               file_server
             }
-            handle_path /static/undefined/* {
+            handle_path /static/${cfg.ui.package.passthru.commit_sha}/* {
               root * ${cfg.ui.package}/dist
               file_server
             }
diff --git a/nixpkgs/nixos/modules/services/web-apps/netbox.nix b/nixpkgs/nixos/modules/services/web-apps/netbox.nix
index 5f42f42a9af9..6d89ffc2a7b7 100644
--- a/nixpkgs/nixos/modules/services/web-apps/netbox.nix
+++ b/nixpkgs/nixos/modules/services/web-apps/netbox.nix
@@ -169,6 +169,13 @@ in {
         AUTH_LDAP_FIND_GROUP_PERMS = True
       '';
     };
+    keycloakClientSecret = lib.mkOption {
+      type = with lib.types; nullOr path;
+      default = null;
+      description = lib.mdDoc ''
+        File that contains the keycloak client secret.
+      '';
+    };
   };
 
   config = lib.mkIf cfg.enable {
@@ -227,7 +234,10 @@ in {
       extraConfig = ''
         with open("${cfg.secretKeyFile}", "r") as file:
             SECRET_KEY = file.readline()
-      '';
+      '' + (lib.optionalString (cfg.keycloakClientSecret != null) ''
+        with open("${cfg.keycloakClientSecret}", "r") as file:
+            SOCIAL_AUTH_KEYCLOAK_SECRET = file.readline()
+      '');
     };
 
     services.redis.servers.netbox.enable = true;
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix b/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
index 676d08b93e2c..e6923bcbb56c 100644
--- a/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
@@ -231,40 +231,14 @@ in
 
     systemd.user.services.dbus.wantedBy = [ "default.target" ];
 
-    programs.dconf.profiles.gdm =
-    let
-      customDconf = pkgs.writeTextFile {
-        name = "gdm-dconf";
-        destination = "/dconf/gdm-custom";
-        text = ''
-          ${optionalString (!cfg.gdm.autoSuspend) ''
-            [org/gnome/settings-daemon/plugins/power]
-            sleep-inactive-ac-type='nothing'
-            sleep-inactive-battery-type='nothing'
-            sleep-inactive-ac-timeout=0
-            sleep-inactive-battery-timeout=0
-          ''}
-        '';
-      };
-
-      customDconfDb = pkgs.stdenv.mkDerivation {
-        name = "gdm-dconf-db";
-        buildCommand = ''
-          ${pkgs.dconf}/bin/dconf compile $out ${customDconf}/dconf
-        '';
+    programs.dconf.profiles.gdm.databases = lib.optionals (!cfg.gdm.autoSuspend) [{
+      settings."org/gnome/settings-daemon/plugins/power" = {
+        sleep-inactive-ac-type = "nothing";
+        sleep-inactive-battery-type = "nothing";
+        sleep-inactive-ac-timeout = lib.gvariant.mkInt32 0;
+        sleep-inactive-battery-timeout = lib.gvariant.mkInt32 0;
       };
-    in pkgs.stdenv.mkDerivation {
-      name = "dconf-gdm-profile";
-      buildCommand = ''
-        # Check that the GDM profile starts with what we expect.
-        if [ $(head -n 1 ${gdm}/share/dconf/profile/gdm) != "user-db:user" ]; then
-          echo "GDM dconf profile changed, please update gdm.nix"
-          exit 1
-        fi
-        # Insert our custom DB behind it.
-        sed '2ifile-db:${customDconfDb}' ${gdm}/share/dconf/profile/gdm > $out
-      '';
-    };
+    }] ++ [ "${gdm}/share/gdm/greeter-dconf-defaults" ];
 
     # Use AutomaticLogin if delay is zero, because it's immediate.
     # Otherwise with TimedLogin with zero seconds the prompt is still
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/sddm.nix b/nixpkgs/nixos/modules/services/x11/display-managers/sddm.nix
index c04edd0d4b7a..47e60236eaeb 100644
--- a/nixpkgs/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/sddm.nix
@@ -267,6 +267,7 @@ in
 
     environment.systemPackages = [ sddm ];
     services.dbus.packages = [ sddm ];
+    systemd.tmpfiles.packages = [ sddm ];
 
     # We're not using the upstream unit, so copy these: https://github.com/sddm/sddm/blob/develop/services/sddm.service.in
     systemd.services.display-manager.after = [
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/default.nix b/nixpkgs/nixos/modules/services/x11/window-managers/default.nix
index ce1d4115f225..e180f2693e0c 100644
--- a/nixpkgs/nixos/modules/services/x11/window-managers/default.nix
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/default.nix
@@ -35,6 +35,7 @@ in
     ./openbox.nix
     ./pekwm.nix
     ./notion.nix
+    ./ragnarwm.nix
     ./ratpoison.nix
     ./sawfish.nix
     ./smallwm.nix
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/ragnarwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/ragnarwm.nix
new file mode 100644
index 000000000000..0843b872dba5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/ragnarwm.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.ragnarwm;
+in
+{
+  ###### interface
+
+  options = {
+    services.xserver.windowManager.ragnarwm = {
+      enable = mkEnableOption (lib.mdDoc "ragnarwm");
+      package = mkOption {
+        type = types.package;
+        default = pkgs.ragnarwm;
+        defaultText = literalExpression "pkgs.ragnarwm";
+        description = lib.mdDoc ''
+          The ragnar package to use.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    services.xserver.displayManager.sessionPackages = [ cfg.package ];
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ sigmanificient ];
+}